Merge branch 'for-upstream/pvhvm' of git://xenbits.xensource.com/people/ianc/linux-2.6

* 'for-upstream/pvhvm' of git://xenbits.xensource.com/people/ianc/linux-2.6:
  xen: pvhvm: make it clearer that XEN_UNPLUG_* define bits in a bitfield
  xen: pvhvm: rename xen_emul_unplug=ignore to =unnnecessary
  xen: pvhvm: allow user to request no emulated device unplug
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 084f6ad..0b1a3f9 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1922,9 +1922,12 @@
       <function>mutex_lock()</function>
       </para>
       <para>
-       There is a <function>mutex_trylock()</function> which can be
-       used inside interrupt context, as it will not sleep.
+       There is a <function>mutex_trylock()</function> which does not
+       sleep.  Still, it must not be used inside interrupt context since
+       its implementation is not safe for that.
        <function>mutex_unlock()</function> will also never sleep.
+       It cannot be used in interrupt context either since a mutex
+       must be released by the same task that acquired it.
       </para>
      </listitem>
     </itemizedlist>
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg
index 1a07fd6..a7952c2 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg
@@ -2,10 +2,6 @@
 ======================
 
 Supported chips:
-  * Fintek F71808E
-    Prefix: 'f71808fg'
-    Addresses scanned: none, address read from Super I/O config space
-    Datasheet: Not public
   * Fintek F71858FG
     Prefix: 'f71858fg'
     Addresses scanned: none, address read from Super I/O config space
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index f6f8025..1565eefd 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -1024,6 +1024,10 @@
 backlight brightness control interface if it detects that the standard
 ACPI interface is available in the ThinkPad.
 
+If you want to use the thinkpad-acpi backlight brightness control
+instead of the generic ACPI video backlight brightness control for some
+reason, you should use the acpi_backlight=vendor kernel parameter.
+
 The brightness_enable module parameter can be used to control whether
 the LCD brightness control feature will be enabled when available.
 brightness_enable=0 forces it to be disabled.  brightness_enable=1
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 568fa08..302db5d 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -49,40 +49,13 @@
       f) MDIO on GPIOs
       g) SPI busses
 
-  VII - Marvell Discovery mv64[345]6x System Controller chips
-    1) The /system-controller node
-    2) Child nodes of /system-controller
-      a) Marvell Discovery MDIO bus
-      b) Marvell Discovery ethernet controller
-      c) Marvell Discovery PHY nodes
-      d) Marvell Discovery SDMA nodes
-      e) Marvell Discovery BRG nodes
-      f) Marvell Discovery CUNIT nodes
-      g) Marvell Discovery MPSCROUTING nodes
-      h) Marvell Discovery MPSCINTR nodes
-      i) Marvell Discovery MPSC nodes
-      j) Marvell Discovery Watch Dog Timer nodes
-      k) Marvell Discovery I2C nodes
-      l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
-      m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
-      n) Marvell Discovery GPP (General Purpose Pins) nodes
-      o) Marvell Discovery PCI host bridge node
-      p) Marvell Discovery CPU Error nodes
-      q) Marvell Discovery SRAM Controller nodes
-      r) Marvell Discovery PCI Error Handler nodes
-      s) Marvell Discovery Memory Controller nodes
-
-  VIII - Specifying interrupt information for devices
+  VII - Specifying interrupt information for devices
     1) interrupts property
     2) interrupt-parent property
     3) OpenPIC Interrupt Controllers
     4) ISA Interrupt Controllers
 
-  IX - Specifying GPIO information for devices
-    1) gpios property
-    2) gpio-controller nodes
-
-  X - Specifying device power management information (sleep property)
+  VIII - Specifying device power management information (sleep property)
 
   Appendix A - Sample SOC node for MPC8540
 
diff --git a/Documentation/powerpc/hvcs.txt b/Documentation/powerpc/hvcs.txt
index f93462c..6d8be34 100644
--- a/Documentation/powerpc/hvcs.txt
+++ b/Documentation/powerpc/hvcs.txt
@@ -560,7 +560,7 @@
 distribution company that provided your OS or by posting issues to the
 PowerPC development mailing list at:
 
-linuxppc-dev@ozlabs.org
+linuxppc-dev@lists.ozlabs.org
 
 This request is to provide a documented and searchable public exchange
 of the problems and solutions surrounding this driver for the benefit of
diff --git a/MAINTAINERS b/MAINTAINERS
index b5b8baa..a1df54b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -454,9 +454,20 @@
 S:	Maintained
 F:	drivers/infiniband/hw/amso1100/
 
+ANALOG DEVICES INC ASOC DRIVERS
+L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+W:	http://blackfin.uclinux.org/
+S:	Supported
+F:	sound/soc/blackfin/*
+F:	sound/soc/codecs/ad1*
+F:	sound/soc/codecs/adau*
+F:	sound/soc/codecs/adav*
+F:	sound/soc/codecs/ssm*
+
 AOA (Apple Onboard Audio) ALSA DRIVER
 M:	Johannes Berg <johannes@sipsolutions.net>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Maintained
 F:	sound/aoa/
@@ -1472,8 +1483,8 @@
 
 CELL BROADBAND ENGINE ARCHITECTURE
 M:	Arnd Bergmann <arnd@arndb.de>
-L:	linuxppc-dev@ozlabs.org
-L:	cbe-oss-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
+L:	cbe-oss-dev@lists.ozlabs.org
 W:	http://www.ibm.com/developerworks/power/cell/
 S:	Supported
 F:	arch/powerpc/include/asm/cell*.h
@@ -2371,13 +2382,13 @@
 FREESCALE DMA DRIVER
 M:	Li Yang <leoli@freescale.com>
 M:	Zhang Wei <zw@zh-kernel.org>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	drivers/dma/fsldma.*
 
 FREESCALE I2C CPM DRIVER
 M:	Jochen Friedrich <jochen@scram.de>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
 F:	drivers/i2c/busses/i2c-cpm.c
@@ -2393,7 +2404,7 @@
 FREESCALE SOC FS_ENET DRIVER
 M:	Pantelis Antoniou <pantelis.antoniou@gmail.com>
 M:	Vitaly Bordug <vbordug@ru.mvista.com>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/net/fs_enet/
@@ -2401,7 +2412,7 @@
 
 FREESCALE QUICC ENGINE LIBRARY
 M:	Timur Tabi <timur@freescale.com>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Supported
 F:	arch/powerpc/sysdev/qe_lib/
 F:	arch/powerpc/include/asm/*qe.h
@@ -2409,27 +2420,27 @@
 FREESCALE USB PERIPHERAL DRIVERS
 M:	Li Yang <leoli@freescale.com>
 L:	linux-usb@vger.kernel.org
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	drivers/usb/gadget/fsl*
 
 FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
 M:	Li Yang <leoli@freescale.com>
 L:	netdev@vger.kernel.org
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	drivers/net/ucc_geth*
 
 FREESCALE QUICC ENGINE UCC UART DRIVER
 M:	Timur Tabi <timur@freescale.com>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Supported
 F:	drivers/serial/ucc_uart.c
 
 FREESCALE SOC SOUND DRIVERS
 M:	Timur Tabi <timur@freescale.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Supported
 F:	sound/soc/fsl/fsl*
 F:	sound/soc/fsl/mpc8610_hpcd.c
@@ -2564,7 +2575,7 @@
 F:	mm/hwpoison-inject.c
 
 HYPERVISOR VIRTUAL CONSOLE DRIVER
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Odd Fixes
 F:	drivers/char/hvc_*
 
@@ -3476,7 +3487,7 @@
 
 LGUEST
 M:	Rusty Russell <rusty@rustcorp.com.au>
-L:	lguest@ozlabs.org
+L:	lguest@lists.ozlabs.org
 W:	http://lguest.ozlabs.org/
 S:	Maintained
 F:	Documentation/lguest/
@@ -3495,7 +3506,7 @@
 M:	Benjamin Herrenschmidt <benh@kernel.crashing.org>
 M:	Paul Mackerras <paulus@samba.org>
 W:	http://www.penguinppc.org/
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 Q:	http://patchwork.ozlabs.org/project/linuxppc-dev/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
 S:	Supported
@@ -3505,14 +3516,14 @@
 LINUX FOR POWER MACINTOSH
 M:	Benjamin Herrenschmidt <benh@kernel.crashing.org>
 W:	http://www.penguinppc.org/
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	arch/powerpc/platforms/powermac/
 F:	drivers/macintosh/
 
 LINUX FOR POWERPC EMBEDDED MPC5XXX
 M:	Grant Likely <grant.likely@secretlab.ca>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 T:	git git://git.secretlab.ca/git/linux-2.6.git
 S:	Maintained
 F:	arch/powerpc/platforms/512x/
@@ -3522,7 +3533,7 @@
 M:	Josh Boyer <jwboyer@linux.vnet.ibm.com>
 M:	Matt Porter <mporter@kernel.crashing.org>
 W:	http://www.penguinppc.org/
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
 S:	Maintained
 F:	arch/powerpc/platforms/40x/
@@ -3531,7 +3542,7 @@
 LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
 M:	Grant Likely <grant.likely@secretlab.ca>
 W:	http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 T:	git git://git.secretlab.ca/git/linux-2.6.git
 S:	Maintained
 F:	arch/powerpc/*/*virtex*
@@ -3541,20 +3552,20 @@
 M:	Vitaly Bordug <vitb@kernel.crashing.org>
 M:	Marcelo Tosatti <marcelo@kvack.org>
 W:	http://www.penguinppc.org/
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	arch/powerpc/platforms/8xx/
 
 LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
 M:	Kumar Gala <galak@kernel.crashing.org>
 W:	http://www.penguinppc.org/
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	arch/powerpc/platforms/83xx/
 
 LINUX FOR POWERPC PA SEMI PWRFICIENT
 M:	Olof Johansson <olof@lixom.net>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	arch/powerpc/platforms/pasemi/
 F:	drivers/*/*pasemi*
@@ -4601,14 +4612,14 @@
 PS3 NETWORK SUPPORT
 M:	Geoff Levand <geoff@infradead.org>
 L:	netdev@vger.kernel.org
-L:	cbe-oss-dev@ozlabs.org
+L:	cbe-oss-dev@lists.ozlabs.org
 S:	Maintained
 F:	drivers/net/ps3_gelic_net.*
 
 PS3 PLATFORM SUPPORT
 M:	Geoff Levand <geoff@infradead.org>
-L:	linuxppc-dev@ozlabs.org
-L:	cbe-oss-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
+L:	cbe-oss-dev@lists.ozlabs.org
 S:	Maintained
 F:	arch/powerpc/boot/ps3*
 F:	arch/powerpc/include/asm/lv1call.h
@@ -4622,7 +4633,7 @@
 
 PS3VRAM DRIVER
 M:	Jim Paris <jim@jtan.com>
-L:	cbe-oss-dev@ozlabs.org
+L:	cbe-oss-dev@lists.ozlabs.org
 S:	Maintained
 F:	drivers/block/ps3vram.c
 
@@ -5068,7 +5079,7 @@
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
 M:	Anton Vorontsov <avorontsov@ru.mvista.com>
-L:	linuxppc-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
 L:	linux-mmc@vger.kernel.org
 S:	Maintained
 F:	drivers/mmc/host/sdhci-of.*
@@ -5485,8 +5496,8 @@
 
 SPU FILE SYSTEM
 M:	Jeremy Kerr <jk@ozlabs.org>
-L:	linuxppc-dev@ozlabs.org
-L:	cbe-oss-dev@ozlabs.org
+L:	linuxppc-dev@lists.ozlabs.org
+L:	cbe-oss-dev@lists.ozlabs.org
 W:	http://www.ibm.com/developerworks/power/cell/
 S:	Supported
 F:	Documentation/filesystems/spufs.txt
diff --git a/Makefile b/Makefile
index f3bdff8..031b61c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 36
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*
@@ -1408,8 +1408,8 @@
 	$(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
 	$(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
 
-kernelrelease: include/config/kernel.release
-	@echo $(KERNELRELEASE)
+kernelrelease:
+	@echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
 
 kernelversion:
 	@echo $(KERNELVERSION)
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 88e608a..842dba3 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -387,8 +387,9 @@
  * sys_execve() executes a new program.
  */
 asmlinkage int
-do_sys_execve(const char __user *ufilename, char __user * __user *argv,
-	      char __user * __user *envp, struct pt_regs *regs)
+do_sys_execve(const char __user *ufilename,
+	      const char __user *const __user *argv,
+	      const char __user *const __user *envp, struct pt_regs *regs)
 {
 	int error;
 	char *filename;
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 99b8200..59c1ce8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -21,6 +21,9 @@
 # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
 KBUILD_CFLAGS	+=$(call cc-option,-marm,)
 
+# Never generate .eh_frame
+KBUILD_CFLAGS	+= $(call cc-option,-fno-dwarf2-cfi-asm)
+
 # Do not use arch/arm/defconfig - it's always outdated.
 # Select a platform tht is kept up-to-date
 KBUILD_DEFCONFIG := versatile_defconfig
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index c974be8..7ce15eb 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -158,15 +158,24 @@
  */
 static inline int valid_user_regs(struct pt_regs *regs)
 {
-	if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
-		regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
-		return 1;
+	unsigned long mode = regs->ARM_cpsr & MODE_MASK;
+
+	/*
+	 * Always clear the F (FIQ) and A (delayed abort) bits
+	 */
+	regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+
+	if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
+		if (mode == USR_MODE)
+			return 1;
+		if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
+			return 1;
 	}
 
 	/*
 	 * Force CPSR to something logical...
 	 */
-	regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
+	regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
 	if (!(elf_hwcap & HWCAP_26BIT))
 		regs->ARM_cpsr |= USR_MODE;
 
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index dd2bf53..d02cfb6 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -392,6 +392,7 @@
 #define __NR_rt_tgsigqueueinfo		(__NR_SYSCALL_BASE+363)
 #define __NR_perf_event_open		(__NR_SYSCALL_BASE+364)
 #define __NR_recvmmsg			(__NR_SYSCALL_BASE+365)
+#define __NR_accept4			(__NR_SYSCALL_BASE+366)
 
 /*
  * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 37ae301..afeb71f 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -375,6 +375,7 @@
 		CALL(sys_rt_tgsigqueueinfo)
 		CALL(sys_perf_event_open)
 /* 365 */	CALL(sys_recvmmsg)
+		CALL(sys_accept4)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 778c2f7..d6e8b4d 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -79,7 +79,7 @@
 		return;
 
 	/* Initialize to zero */
-	for (regno = 0; regno < GDB_MAX_REGS; regno++)
+	for (regno = 0; regno < DBG_MAX_REG_NUM; regno++)
 		gdb_regs[regno] = 0;
 
 	/* Otherwise, we have only some registers from switch_to() */
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 5b7c541..62e7c61 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -62,8 +62,9 @@
 /* sys_execve() executes a new program.
  * This is called indirectly via a small wrapper
  */
-asmlinkage int sys_execve(const char __user *filenamei, char __user * __user *argv,
-			  char __user * __user *envp, struct pt_regs *regs)
+asmlinkage int sys_execve(const char __user *filenamei,
+			  const char __user *const __user *argv,
+			  const char __user *const __user *envp, struct pt_regs *regs)
 {
 	int error;
 	char * filename;
@@ -78,14 +79,17 @@
 	return error;
 }
 
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	struct pt_regs regs;
 	int ret;
 
 	memset(&regs, 0, sizeof(struct pt_regs));
-	ret = do_execve(filename, (char __user * __user *)argv,
-			(char __user * __user *)envp, &regs);
+	ret = do_execve(filename,
+			(const char __user *const __user *)argv,
+			(const char __user *const __user *)envp, &regs);
 	if (ret < 0)
 		goto out;
 
diff --git a/arch/arm/plat-samsung/dev-hsmmc.c b/arch/arm/plat-samsung/dev-hsmmc.c
index b0f93f1..9d2be09 100644
--- a/arch/arm/plat-samsung/dev-hsmmc.c
+++ b/arch/arm/plat-samsung/dev-hsmmc.c
@@ -70,4 +70,6 @@
 		set->cfg_gpio = pd->cfg_gpio;
 	if (pd->cfg_card)
 		set->cfg_card = pd->cfg_card;
+	if (pd->host_caps)
+		set->host_caps = pd->host_caps;
 }
diff --git a/arch/arm/plat-samsung/dev-hsmmc1.c b/arch/arm/plat-samsung/dev-hsmmc1.c
index 1504fd8..a6c8295 100644
--- a/arch/arm/plat-samsung/dev-hsmmc1.c
+++ b/arch/arm/plat-samsung/dev-hsmmc1.c
@@ -70,4 +70,6 @@
 		set->cfg_gpio = pd->cfg_gpio;
 	if (pd->cfg_card)
 		set->cfg_card = pd->cfg_card;
+	if (pd->host_caps)
+		set->host_caps = pd->host_caps;
 }
diff --git a/arch/arm/plat-samsung/dev-hsmmc2.c b/arch/arm/plat-samsung/dev-hsmmc2.c
index b28ef17..cb0d714 100644
--- a/arch/arm/plat-samsung/dev-hsmmc2.c
+++ b/arch/arm/plat-samsung/dev-hsmmc2.c
@@ -71,4 +71,6 @@
 		set->cfg_gpio = pd->cfg_gpio;
 	if (pd->cfg_card)
 		set->cfg_card = pd->cfg_card;
+	if (pd->host_caps)
+		set->host_caps = pd->host_caps;
 }
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index e5daddf..9c46aaa 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -384,8 +384,9 @@
 }
 
 asmlinkage int sys_execve(const char __user *ufilename,
-			  char __user *__user *uargv,
-			  char __user *__user *uenvp, struct pt_regs *regs)
+			  const char __user *const __user *uargv,
+			  const char __user *const __user *uenvp,
+			  struct pt_regs *regs)
 {
 	int error;
 	char *filename;
diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c
index 459349b..62635a0 100644
--- a/arch/avr32/kernel/sys_avr32.c
+++ b/arch/avr32/kernel/sys_avr32.c
@@ -7,7 +7,9 @@
  */
 #include <linux/unistd.h>
 
-int kernel_execve(const char *file, char **argv, char **envp)
+int kernel_execve(const char *file,
+		  const char *const *argv,
+		  const char *const *envp)
 {
 	register long scno asm("r8") = __NR_execve;
 	register long sc1 asm("r12") = (long)file;
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index d5872cd..3f7ef4d 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -22,7 +22,9 @@
 
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/const_hweight.h>
 #include <asm-generic/bitops/lock.h>
+
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/minix.h>
@@ -115,7 +117,7 @@
  * of bits set) of a N-bit word
  */
 
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
 {
 	unsigned int res;
 
@@ -125,19 +127,20 @@
 	return res;
 }
 
-static inline unsigned int hweight64(__u64 w)
+static inline unsigned int __arch_hweight64(__u64 w)
 {
-	return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+	return __arch_hweight32((unsigned int)(w >> 32)) +
+	       __arch_hweight32((unsigned int)w);
 }
 
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
 {
-	return hweight32(w & 0xffff);
+	return __arch_hweight32(w & 0xffff);
 }
 
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
 {
-	return hweight32(w & 0xff);
+	return __arch_hweight32(w & 0xff);
 }
 
 #endif				/* _BLACKFIN_BITOPS_H */
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index 22886cb..14fcd25 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -389,8 +389,11 @@
 #define __NR_rt_tgsigqueueinfo	368
 #define __NR_perf_event_open	369
 #define __NR_recvmmsg		370
+#define __NR_fanotify_init	371
+#define __NR_fanotify_mark	372
+#define __NR_prlimit64		373
 
-#define __NR_syscall		371
+#define __NR_syscall		374
 #define NR_syscalls		__NR_syscall
 
 /* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index a566f61..01f98cb 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -209,7 +209,9 @@
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(const char __user *name, char __user * __user *argv, char __user * __user *envp)
+asmlinkage int sys_execve(const char __user *name,
+			  const char __user *const __user *argv,
+			  const char __user *const __user *envp)
 {
 	int error;
 	char *filename;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index a5847f5..af1bffa 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1628,6 +1628,9 @@
 	.long _sys_rt_tgsigqueueinfo
 	.long _sys_perf_event_open
 	.long _sys_recvmmsg		/* 370 */
+	.long _sys_fanotify_init
+	.long _sys_fanotify_mark
+	.long _sys_prlimit64
 
 	.rept NR_syscalls-(.-_sys_call_table)/4
 	.long _sys_ni_syscall
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index 93f0f64..9a57db6 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -204,7 +204,9 @@
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(const char *fname, char **argv, char **envp,
+asmlinkage int sys_execve(const char *fname,
+			  const char *const *argv,
+			  const char *const *envp,
 			  long r13, long mof, long srp, 
 			  struct pt_regs *regs)
 {
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index 2661a95..562f847 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -218,8 +218,10 @@
 
 /* sys_execve() executes a new program. */
 asmlinkage int
-sys_execve(const char *fname, char **argv, char **envp, long r13, long mof, long srp,
-	struct pt_regs *regs)
+sys_execve(const char *fname,
+	   const char *const *argv,
+	   const char *const *envp, long r13, long mof, long srp,
+	   struct pt_regs *regs)
 {
 	int error;
 	char *filename;
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 428931c..2b63b01 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -250,8 +250,9 @@
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(const char __user *name, char __user * __user *argv,
-			  char __user * __user *envp)
+asmlinkage int sys_execve(const char __user *name,
+			  const char __user *const __user *argv,
+			  const char __user *const __user *envp)
 {
 	int error;
 	char * filename;
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 8b7b78d..9747813 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -212,7 +212,10 @@
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(const char *name, char **argv, char **envp,int dummy,...)
+asmlinkage int sys_execve(const char *name,
+			  const char *const *argv,
+			  const char *const *envp,
+			  int dummy, ...)
 {
 	int error;
 	char * filename;
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
index f9b3f44..dc1ac02 100644
--- a/arch/h8300/kernel/sys_h8300.c
+++ b/arch/h8300/kernel/sys_h8300.c
@@ -51,7 +51,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register long res __asm__("er0");
 	register char *const *_c __asm__("er3") = envp;
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 87f1bd1..954d398 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -356,8 +356,6 @@
 				int fd, long pgoff);
 struct pt_regs;
 struct sigaction;
-long sys_execve(const char __user *filename, char __user * __user *argv,
-			   char __user * __user *envp, struct pt_regs *regs);
 asmlinkage long sys_ia64_pipe(void);
 asmlinkage long sys_rt_sigaction(int sig,
 				 const struct sigaction __user *act,
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index a879c03..16f1c7b 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -633,7 +633,9 @@
 }
 
 long
-sys_execve (const char __user *filename, char __user * __user *argv, char __user * __user *envp,
+sys_execve (const char __user *filename,
+	    const char __user *const __user *argv,
+	    const char __user *const __user *envp,
 	    struct pt_regs *regs)
 {
 	char *fname;
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 8665a4d..422bea9 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -289,8 +289,8 @@
  * sys_execve() executes a new program.
  */
 asmlinkage int sys_execve(const char __user *ufilename,
-			  char __user * __user *uargv,
-			  char __user * __user *uenvp,
+			  const char __user *const __user *uargv,
+			  const char __user *const __user *uenvp,
 			  unsigned long r3, unsigned long r4, unsigned long r5,
 			  unsigned long r6, struct pt_regs regs)
 {
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index 0a00f46..d841fb6 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -93,7 +93,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register long __scno __asm__ ("r7") = __NR_execve;
 	register long __arg3 __asm__ ("r2") = (long)(envp);
diff --git a/arch/m68k/include/asm/ide.h b/arch/m68k/include/asm/ide.h
index 3958726..492fee8 100644
--- a/arch/m68k/include/asm/ide.h
+++ b/arch/m68k/include/asm/ide.h
@@ -1,6 +1,4 @@
 /*
- *  linux/include/asm-m68k/ide.h
- *
  *  Copyright (C) 1994-1996  Linus Torvalds & authors
  */
 
@@ -34,6 +32,8 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 
+#ifdef CONFIG_MMU
+
 /*
  * Get rid of defs from io.h - ide has its private and conflicting versions
  * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we
@@ -53,5 +53,14 @@
 #define __ide_mm_outsw(port, addr, n)	raw_outsw((u16 *)port, addr, n)
 #define __ide_mm_outsl(port, addr, n)	raw_outsl((u32 *)port, addr, n)
 
+#else
+
+#define __ide_mm_insw(port, addr, n)	io_insw((unsigned int)port, addr, n)
+#define __ide_mm_insl(port, addr, n)	io_insl((unsigned int)port, addr, n)
+#define __ide_mm_outsw(port, addr, n)	io_outsw((unsigned int)port, addr, n)
+#define __ide_mm_outsl(port, addr, n)	io_outsl((unsigned int)port, addr, n)
+
+#endif /* CONFIG_MMU */
+
 #endif /* __KERNEL__ */
 #endif /* _M68K_IDE_H */
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 221d0b7..18732ab 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -315,7 +315,9 @@
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(const char __user *name, char __user * __user *argv, char __user * __user *envp)
+asmlinkage int sys_execve(const char __user *name,
+			  const char __user *const __user *argv,
+			  const char __user *const __user *envp)
 {
 	int error;
 	char * filename;
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 7789669..2f431ec 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -459,7 +459,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register long __res asm ("%d0") = __NR_execve;
 	register long __a asm ("%d1") = (long)(filename);
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 6350f68..6d33905 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -316,14 +316,14 @@
 		fp->d0, fp->d1, fp->d2, fp->d3);
 	printk(KERN_EMERG "d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
 		fp->d4, fp->d5, fp->a0, fp->a1);
-	printk(KERN_EMERG "\nUSP: %08x   TRAPFRAME: %08x\n",
-		(unsigned int) rdusp(), (unsigned int) fp);
+	printk(KERN_EMERG "\nUSP: %08x   TRAPFRAME: %p\n",
+		(unsigned int) rdusp(), fp);
 
 	printk(KERN_EMERG "\nCODE:");
 	tp = ((unsigned char *) fp->pc) - 0x20;
 	for (sp = (unsigned long *) tp, i = 0; (i < 0x40);  i += 4) {
 		if ((i % 0x10) == 0)
-			printk(KERN_EMERG "%08x: ", (int) (tp + i));
+			printk(KERN_EMERG "%p: ", tp + i);
 		printk("%08x ", (int) *sp++);
 	}
 	printk(KERN_EMERG "\n");
@@ -332,7 +332,7 @@
 	tp = ((unsigned char *) fp) - 0x40;
 	for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
 		if ((i % 0x10) == 0)
-			printk(KERN_EMERG "%08x: ", (int) (tp + i));
+			printk(KERN_EMERG "%p: ", tp + i);
 		printk("%08x ", (int) *sp++);
 	}
 	printk(KERN_EMERG "\n");
@@ -341,7 +341,7 @@
 	tp = (unsigned char *) (rdusp() - 0x10);
 	for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
 		if ((i % 0x10) == 0)
-			printk(KERN_EMERG "%08x: ", (int) (tp + i));
+			printk(KERN_EMERG "%p: ", tp + i);
 		printk("%08x ", (int) *sp++);
 	}
 	printk(KERN_EMERG "\n");
@@ -350,7 +350,9 @@
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(const char *name, char **argv, char **envp)
+asmlinkage int sys_execve(const char *name,
+			  const char *const *argv,
+			  const char *const *envp)
 {
 	int error;
 	char * filename;
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
index d65e9c4..68488ae 100644
--- a/arch/m68knommu/kernel/sys_m68k.c
+++ b/arch/m68knommu/kernel/sys_m68k.c
@@ -44,7 +44,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register long __res asm ("%d0") = __NR_execve;
 	register long __a asm ("%d1") = (long)(filename);
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
index d33ba17..99d9b61 100644
--- a/arch/microblaze/kernel/prom_parse.c
+++ b/arch/microblaze/kernel/prom_parse.c
@@ -73,7 +73,7 @@
 		/* We can only get here if we hit a P2P bridge with no node,
 		 * let's do standard swizzling and try again
 		 */
-		lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+		lspec = pci_swizzle_interrupt_pin(pdev, lspec);
 		pdev = ppdev;
 	}
 
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 6abab6e..2250fe9 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -47,8 +47,10 @@
 	return do_fork(flags, stack, regs, 0, NULL, NULL);
 }
 
-asmlinkage long microblaze_execve(const char __user *filenamei, char __user *__user *argv,
-			char __user *__user *envp, struct pt_regs *regs)
+asmlinkage long microblaze_execve(const char __user *filenamei,
+				  const char __user *const __user *argv,
+				  const char __user *const __user *envp,
+				  struct pt_regs *regs)
 {
 	int error;
 	char *filename;
@@ -77,7 +79,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register const char *__a __asm__("r5") = filename;
 	register const void *__b __asm__("r6") = argv;
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 23be25f..55ef532 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -27,10 +27,11 @@
 #include <linux/irq.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/byteorder.h>
 
@@ -1077,7 +1078,7 @@
 		struct dev_archdata *sd = &dev->dev.archdata;
 
 		/* Setup OF node pointer in archdata */
-		sd->of_node = pci_device_to_OF_node(dev);
+		dev->dev.of_node = pci_device_to_OF_node(dev);
 
 		/* Fixup NUMA node as it may not be setup yet by the generic
 		 * code and is needed by the DMA init
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c
index 7869a41..0687a42 100644
--- a/arch/microblaze/pci/xilinx_pci.c
+++ b/arch/microblaze/pci/xilinx_pci.c
@@ -16,6 +16,7 @@
 
 #include <linux/ioport.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/pci.h>
 #include <asm/io.h>
 
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index bddce0b..1dc6edf 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -258,8 +258,10 @@
 	error = PTR_ERR(filename);
 	if (IS_ERR(filename))
 		goto out;
-	error = do_execve(filename, (char __user *__user *) (long)regs.regs[5],
-	                  (char __user *__user *) (long)regs.regs[6], &regs);
+	error = do_execve(filename,
+			  (const char __user *const __user *) (long)regs.regs[5],
+	                  (const char __user *const __user *) (long)regs.regs[6],
+			  &regs);
 	putname(filename);
 
 out:
@@ -436,7 +438,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register unsigned long __a0 asm("$4") = (unsigned long) filename;
 	register unsigned long __a1 asm("$5") = (unsigned long) argv;
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 762eb32..f48373e 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -269,8 +269,8 @@
 }
 
 asmlinkage long sys_execve(const char __user *name,
-			   char __user * __user *argv,
-			   char __user * __user *envp)
+			   const char __user *const __user *argv,
+			   const char __user *const __user *envp)
 {
 	char *filename;
 	int error;
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index 4e34880..159acb0 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -25,7 +25,8 @@
 	unsigned long addr;
 	void *ret;
 
-	printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp);
+	pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
+		 dev ? dev_name(dev) : "?", size, gfp);
 
 	if (0xbe000000 - pci_sram_allocated >= size) {
 		size = (size + 255) & ~255;
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 1444875..0dc8543 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -41,8 +41,10 @@
 	if (IS_ERR(filename))
 		goto out;
 
-	error = do_execve(filename, (char __user * __user *) regs->gr[25],
-		(char __user * __user *) regs->gr[24], regs);
+	error = do_execve(filename,
+			  (const char __user *const __user *) regs->gr[25],
+			  (const char __user *const __user *) regs->gr[24],
+			  regs);
 
 	putname(filename);
 
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 76332da..4b4b918 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -348,17 +348,22 @@
 	error = PTR_ERR(filename);
 	if (IS_ERR(filename))
 		goto out;
-	error = do_execve(filename, (char __user * __user *) regs->gr[25],
-		(char __user * __user *) regs->gr[24], regs);
+	error = do_execve(filename,
+			  (const char __user *const __user *) regs->gr[25],
+			  (const char __user *const __user *) regs->gr[24],
+			  regs);
 	putname(filename);
 out:
 
 	return error;
 }
 
-extern int __execve(const char *filename, char *const argv[],
-		char *const envp[], struct task_struct *task);
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+extern int __execve(const char *filename,
+		    const char *const argv[],
+		    const char *const envp[], struct task_struct *task);
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	return __execve(filename, argv, envp, current);
 }
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index feacfb7..91356ff 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1034,8 +1034,9 @@
 	flush_fp_to_thread(current);
 	flush_altivec_to_thread(current);
 	flush_spe_to_thread(current);
-	error = do_execve(filename, (char __user * __user *) a1,
-			  (char __user * __user *) a2, regs);
+	error = do_execve(filename,
+			  (const char __user *const __user *) a1,
+			  (const char __user *const __user *) a2, regs);
 	putname(filename);
 out:
 	return error;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7eafaf2..d3a2d1c 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -267,8 +267,9 @@
 /*
  * sys_execve() executes a new program.
  */
-SYSCALL_DEFINE3(execve, const char __user *, name, char __user * __user *, argv,
-		char __user * __user *, envp)
+SYSCALL_DEFINE3(execve, const char __user *, name,
+		const char __user *const __user *, argv,
+		const char __user *const __user *, envp)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	char *filename;
diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c
index 651096f..e478bf9 100644
--- a/arch/score/kernel/sys_score.c
+++ b/arch/score/kernel/sys_score.c
@@ -99,8 +99,10 @@
 	if (IS_ERR(filename))
 		return error;
 
-	error = do_execve(filename, (char __user *__user*)regs->regs[5],
-			  (char __user *__user *) regs->regs[6], regs);
+	error = do_execve(filename,
+			  (const char __user *const __user *)regs->regs[5],
+			  (const char __user *const __user *)regs->regs[6],
+			  regs);
 
 	putname(filename);
 	return error;
@@ -110,7 +112,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register unsigned long __r4 asm("r4") = (unsigned long) filename;
 	register unsigned long __r5 asm("r5") = (unsigned long) argv;
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 0529819..762a139 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -296,9 +296,10 @@
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
-			  char __user * __user *uenvp, unsigned long r7,
-			  struct pt_regs __regs)
+asmlinkage int sys_execve(const char __user *ufilename,
+			  const char __user *const __user *uargv,
+			  const char __user *const __user *uenvp,
+			  unsigned long r7, struct pt_regs __regs)
 {
 	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	int error;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 68d128d..210c1ca 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -497,8 +497,8 @@
 		goto out;
 
 	error = do_execve(filename,
-			  (char __user * __user *)uargv,
-			  (char __user * __user *)uenvp,
+			  (const char __user *const __user *)uargv,
+			  (const char __user *const __user *)uenvp,
 			  pregs);
 	putname(filename);
 out:
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index eb68bfd..f56b6fe5 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -71,7 +71,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register long __sc0 __asm__ ("r3") = __NR_execve;
 	register long __sc4 __asm__ ("r4") = (long) filename;
diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
index 2872357..c5a38c4 100644
--- a/arch/sh/kernel/sys_sh64.c
+++ b/arch/sh/kernel/sys_sh64.c
@@ -33,7 +33,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
 	register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 2050ca0..f0c7422 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -25,9 +25,9 @@
 extern void atomic64_sub(int, atomic64_t *);
 
 extern int atomic_add_ret(int, atomic_t *);
-extern int atomic64_add_ret(int, atomic64_t *);
+extern long atomic64_add_ret(int, atomic64_t *);
 extern int atomic_sub_ret(int, atomic_t *);
-extern int atomic64_sub_ret(int, atomic64_t *);
+extern long atomic64_sub_ret(int, atomic64_t *);
 
 #define atomic_dec_return(v) atomic_sub_ret(1, v)
 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
@@ -91,7 +91,7 @@
 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 {
 	long c, old;
 	c = atomic64_read(v);
diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/fb.h
index e834880..2173432 100644
--- a/arch/sparc/include/asm/fb.h
+++ b/arch/sparc/include/asm/fb.h
@@ -1,5 +1,6 @@
 #ifndef _SPARC_FB_H_
 #define _SPARC_FB_H_
+#include <linux/console.h>
 #include <linux/fb.h>
 #include <linux/fs.h>
 #include <asm/page.h>
@@ -18,6 +19,9 @@
 	struct device *dev = info->device;
 	struct device_node *node;
 
+	if (console_set_on_cmdline)
+		return 0;
+
 	node = dev->of_node;
 	if (node &&
 	    node == of_console_device)
diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h
index a303c9d..e4c61a1 100644
--- a/arch/sparc/include/asm/rwsem-const.h
+++ b/arch/sparc/include/asm/rwsem-const.h
@@ -5,7 +5,7 @@
 #define RWSEM_UNLOCKED_VALUE		0x00000000
 #define RWSEM_ACTIVE_BIAS		0x00000001
 #define RWSEM_ACTIVE_MASK		0x0000ffff
-#define RWSEM_WAITING_BIAS		0xffff0000
+#define RWSEM_WAITING_BIAS		(-0x00010000)
 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index d0b3b01..03eb5a8 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -397,8 +397,11 @@
 #define __NR_rt_tgsigqueueinfo	326
 #define __NR_perf_event_open	327
 #define __NR_recvmmsg		328
+#define __NR_fanotify_init	329
+#define __NR_fanotify_mark	330
+#define __NR_prlimit64		331
 
-#define NR_syscalls		329
+#define NR_syscalls		332
 
 #ifdef __32bit_syscall_numbers__
 /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 40e29fc..1752929 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -633,8 +633,10 @@
 	if(IS_ERR(filename))
 		goto out;
 	error = do_execve(filename,
-			  (char __user * __user *)regs->u_regs[base + UREG_I1],
-			  (char __user * __user *)regs->u_regs[base + UREG_I2],
+			  (const char __user *const  __user *)
+			  regs->u_regs[base + UREG_I1],
+			  (const char __user *const  __user *)
+			  regs->u_regs[base + UREG_I2],
 			  regs);
 	putname(filename);
 out:
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index dbe81a3..485f547 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -739,9 +739,9 @@
 	if (IS_ERR(filename))
 		goto out;
 	error = do_execve(filename,
-			  (char __user * __user *)
+			  (const char __user *const __user *)
 			  regs->u_regs[base + UREG_I1],
-			  (char __user * __user *)
+			  (const char __user *const __user *)
 			  regs->u_regs[base + UREG_I2], regs);
 	putname(filename);
 	if (!error) {
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index 46a76ba..44e5faf 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -330,6 +330,15 @@
 	nop
 	nop
 
+	.globl		sys32_fanotify_mark
+sys32_fanotify_mark:
+	sethi		%hi(sys_fanotify_mark), %g1
+	sllx		%o2, 32, %o2
+	or		%o2, %o3, %o2
+	mov		%o4, %o3
+	jmpl		%g1 + %lo(sys_fanotify_mark), %g0
+	 mov		%o5, %o4
+
 	.section	__ex_table,"a"
 	.align		4
 	.word		1b, __retl_efault, 2b, __retl_efault
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index ee995b7..5079413 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -282,7 +282,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	long __res;
 	register long __g1 __asm__ ("g1") = __NR_execve;
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 3d435c4..f836f4e 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -758,7 +758,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	long __res;
 	register long __g1 __asm__ ("g1") = __NR_execve;
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 801fc8e..ec396e1 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -82,5 +82,6 @@
 /*310*/	.long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
 /*315*/	.long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/	.long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
-/*325*/	.long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg
+/*325*/	.long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
+/*330*/	.long sys_fanotify_mark, sys_prlimit64
 
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 9db058d..8cfcaa5 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -83,7 +83,8 @@
 /*310*/	.word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
 	.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
-	.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg
+	.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
+/*330*/	.word sys32_fanotify_mark, sys_prlimit64
 
 #endif /* CONFIG_COMPAT */
 
@@ -158,4 +159,5 @@
 /*310*/	.word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
 	.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
-	.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg
+	.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
+/*330*/	.word sys_fanotify_mark, sys_prlimit64
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index ed590ad..985cc28c 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -543,8 +543,9 @@
 /*
  * sys_execve() executes a new program.
  */
-long _sys_execve(char __user *path, char __user *__user *argv,
-		 char __user *__user *envp, struct pt_regs *regs)
+long _sys_execve(const char __user *path,
+		 const char __user *const __user *argv,
+		 const char __user *const __user *envp, struct pt_regs *regs)
 {
 	long error;
 	char *filename;
diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h
index 17a2cb5..1f469e80 100644
--- a/arch/um/include/asm/dma-mapping.h
+++ b/arch/um/include/asm/dma-mapping.h
@@ -95,13 +95,6 @@
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-static inline int
-dma_get_cache_alignment(void)
-{
-	BUG();
-	return(0);
-}
-
 static inline void
 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 59b20d9..cd145ed 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -44,8 +44,9 @@
 	PT_REGS_SP(regs) = esp;
 }
 
-static long execve1(const char *file, char __user * __user *argv,
-		    char __user *__user *env)
+static long execve1(const char *file,
+		    const char __user *const __user *argv,
+		    const char __user *const __user *env)
 {
 	long error;
 
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index 7427c0b..5ddb246 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -51,7 +51,9 @@
 	return err;
 }
 
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	mm_segment_t fs;
 	int ret;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a84fc34..cea0cd9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -245,6 +245,11 @@
 
 config KTIME_SCALAR
 	def_bool X86_32
+
+config ARCH_CPU_PROBE_RELEASE
+	def_bool y
+	depends on HOTPLUG_CPU
+
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
@@ -749,11 +754,11 @@
 	def_bool (AMD_IOMMU || DMAR)
 
 config MAXSMP
-	bool "Configure Maximum number of SMP Processors and NUMA Nodes"
+	bool "Enable Maximum number of SMP Processors and NUMA Nodes"
 	depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
 	select CPUMASK_OFFSTACK
 	---help---
-	  Configure maximum number of CPUS and NUMA Nodes for this architecture.
+	  Enable maximum number of CPUS and NUMA Nodes for this architecture.
 	  If unsure, say N.
 
 config NR_CPUS
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 2984a25..f686f49 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -26,6 +26,7 @@
 struct vm_area_struct;
 
 extern pgd_t swapper_pg_dir[1024];
+extern pgd_t trampoline_pg_dir[1024];
 
 static inline void pgtable_cache_init(void) { }
 static inline void check_pgt_cache(void) { }
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index feb2ff9b..f1d8b44 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -23,8 +23,9 @@
 /* kernel/process.c */
 int sys_fork(struct pt_regs *);
 int sys_vfork(struct pt_regs *);
-long sys_execve(const char __user *, char __user * __user *,
-		char __user * __user *, struct pt_regs *);
+long sys_execve(const char __user *,
+		const char __user *const __user *,
+		const char __user *const __user *, struct pt_regs *);
 long sys_clone(unsigned long, unsigned long, void __user *,
 	       void __user *, struct pt_regs *);
 
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index cb507bb..4dde797 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,14 +13,17 @@
 
 extern unsigned long init_rsp;
 extern unsigned long initial_code;
+extern unsigned long initial_page_table;
 extern unsigned long initial_gs;
 
 #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
 
 extern unsigned long setup_trampoline(void);
+extern void __init setup_trampoline_page_table(void);
 extern void __init reserve_trampoline_memory(void);
 #else
-static inline void reserve_trampoline_memory(void) {};
+static inline void setup_trampoline_page_table(void) {}
+static inline void reserve_trampoline_memory(void) {}
 #endif /* CONFIG_X86_TRAMPOLINE */
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4dc0084..f1efeba 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1728,6 +1728,8 @@
 		struct irq_pin_list *entry;
 
 		cfg = desc->chip_data;
+		if (!cfg)
+			continue;
 		entry = cfg->irq_2_pin;
 		if (!entry)
 			continue;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 60a57b1..ba5f62f 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -669,7 +669,7 @@
 	}
 
 	/* OSVW unavailable or ID unknown, match family-model-stepping range */
-	ms = (cpu->x86_model << 8) | cpu->x86_mask;
+	ms = (cpu->x86_model << 4) | cpu->x86_mask;
 	while ((range = *erratum++))
 		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
 		    (ms >= AMD_MODEL_RANGE_START(range)) &&
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 214ac86..d8d86d0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -491,33 +491,78 @@
  *   Intel Errata AAP53  (model 30)
  *   Intel Errata BD53   (model 44)
  *
- * These chips need to be 'reset' when adding counters by programming
- * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
- * either in sequence on the same PMC or on different PMCs.
+ * The official story:
+ *   These chips need to be 'reset' when adding counters by programming the
+ *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
+ *   in sequence on the same PMC or on different PMCs.
+ *
+ * In practise it appears some of these events do in fact count, and
+ * we need to programm all 4 events.
  */
+static void intel_pmu_nhm_workaround(void)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	static const unsigned long nhm_magic[4] = {
+		0x4300B5,
+		0x4300D2,
+		0x4300B1,
+		0x4300B1
+	};
+	struct perf_event *event;
+	int i;
+
+	/*
+	 * The Errata requires below steps:
+	 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
+	 * 2) Configure 4 PERFEVTSELx with the magic events and clear
+	 *    the corresponding PMCx;
+	 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
+	 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
+	 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
+	 */
+
+	/*
+	 * The real steps we choose are a little different from above.
+	 * A) To reduce MSR operations, we don't run step 1) as they
+	 *    are already cleared before this function is called;
+	 * B) Call x86_perf_event_update to save PMCx before configuring
+	 *    PERFEVTSELx with magic number;
+	 * C) With step 5), we do clear only when the PERFEVTSELx is
+	 *    not used currently.
+	 * D) Call x86_perf_event_set_period to restore PMCx;
+	 */
+
+	/* We always operate 4 pairs of PERF Counters */
+	for (i = 0; i < 4; i++) {
+		event = cpuc->events[i];
+		if (event)
+			x86_perf_event_update(event);
+	}
+
+	for (i = 0; i < 4; i++) {
+		wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
+		wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
+	}
+
+	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
+	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
+
+	for (i = 0; i < 4; i++) {
+		event = cpuc->events[i];
+
+		if (event) {
+			x86_perf_event_set_period(event);
+			__x86_pmu_enable_event(&event->hw,
+					ARCH_PERFMON_EVENTSEL_ENABLE);
+		} else
+			wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
+	}
+}
+
 static void intel_pmu_nhm_enable_all(int added)
 {
-	if (added) {
-		struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-		int i;
-
-		wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
-		wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
-		wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
-
-		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
-		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
-
-		for (i = 0; i < 3; i++) {
-			struct perf_event *event = cpuc->events[i];
-
-			if (!event)
-				continue;
-
-			__x86_pmu_enable_event(&event->hw,
-					       ARCH_PERFMON_EVENTSEL_ENABLE);
-		}
-	}
+	if (added)
+		intel_pmu_nhm_workaround();
 	intel_pmu_enable_all(added);
 }
 
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index ff4c453..fa8c1b8 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -334,7 +334,7 @@
 /*
  * Enable paging
  */
-	movl $pa(swapper_pg_dir),%eax
+	movl pa(initial_page_table), %eax
 	movl %eax,%cr3		/* set the page table pointer.. */
 	movl %cr0,%eax
 	orl  $X86_CR0_PG,%eax
@@ -614,6 +614,8 @@
 .align 4
 ENTRY(initial_code)
 	.long i386_start_kernel
+ENTRY(initial_page_table)
+	.long pa(swapper_pg_dir)
 
 /*
  * BSS section
@@ -629,6 +631,10 @@
 #endif
 swapper_pg_fixmap:
 	.fill 1024,4,0
+#ifdef CONFIG_X86_TRAMPOLINE
+ENTRY(trampoline_pg_dir)
+	.fill 1024,4,0
+#endif
 ENTRY(empty_zero_page)
 	.fill 4096,1,0
 
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 1f11f5c..a46cb35 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -40,6 +40,7 @@
 
 static unsigned int		mxcsr_feature_mask __read_mostly = 0xffffffffu;
 unsigned int xstate_size;
+EXPORT_SYMBOL_GPL(xstate_size);
 unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
 static struct i387_fxsave_struct fx_scratch __cpuinitdata;
 
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index ef10940..852b819 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -194,7 +194,7 @@
 	unsigned long		addr;
 	int			len;
 	int			type;
-	struct perf_event	**pev;
+	struct perf_event	* __percpu *pev;
 } breakinfo[HBP_NUM];
 
 static unsigned long early_dr7;
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 1bfb6cf..770ebfb 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -709,6 +709,7 @@
 	struct hlist_node *node, *tmp;
 	unsigned long flags, orig_ret_address = 0;
 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+	kprobe_opcode_t *correct_ret_addr = NULL;
 
 	INIT_HLIST_HEAD(&empty_rp);
 	kretprobe_hash_lock(current, &head, &flags);
@@ -740,15 +741,7 @@
 			/* another task is sharing our hash bucket */
 			continue;
 
-		if (ri->rp && ri->rp->handler) {
-			__get_cpu_var(current_kprobe) = &ri->rp->kp;
-			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
-			ri->rp->handler(ri, regs);
-			__get_cpu_var(current_kprobe) = NULL;
-		}
-
 		orig_ret_address = (unsigned long)ri->ret_addr;
-		recycle_rp_inst(ri, &empty_rp);
 
 		if (orig_ret_address != trampoline_address)
 			/*
@@ -761,6 +754,32 @@
 
 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
 
+	correct_ret_addr = ri->ret_addr;
+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		if (ri->rp && ri->rp->handler) {
+			__get_cpu_var(current_kprobe) = &ri->rp->kp;
+			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+			ri->ret_addr = correct_ret_addr;
+			ri->rp->handler(ri, regs);
+			__get_cpu_var(current_kprobe) = NULL;
+		}
+
+		recycle_rp_inst(ri, &empty_rp);
+
+		if (orig_ret_address != trampoline_address)
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+	}
+
 	kretprobe_hash_unlock(current, &flags);
 
 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 64ecaf0..57d1868 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -301,8 +301,9 @@
 /*
  * sys_execve() executes a new program.
  */
-long sys_execve(const char __user *name, char __user * __user *argv,
-		char __user * __user *envp, struct pt_regs *regs)
+long sys_execve(const char __user *name,
+		const char __user *const __user *argv,
+		const char __user *const __user *envp, struct pt_regs *regs)
 {
 	long error;
 	char *filename;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b008e78..c3a4fbb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1014,6 +1014,8 @@
 	paging_init();
 	x86_init.paging.pagetable_setup_done(swapper_pg_dir);
 
+	setup_trampoline_page_table();
+
 	tboot_probe();
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a5e928b..8b3bfc4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -73,7 +73,6 @@
 
 #ifdef CONFIG_X86_32
 u8 apicid_2_node[MAX_APICID];
-static int low_mappings;
 #endif
 
 /* State of each CPU */
@@ -91,6 +90,25 @@
 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
 #define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
 #define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
+
+/*
+ * We need this for trampoline_base protection from concurrent accesses when
+ * off- and onlining cores wildly.
+ */
+static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
+
+void cpu_hotplug_driver_lock()
+{
+        mutex_lock(&x86_cpu_hotplug_driver_mutex);
+}
+
+void cpu_hotplug_driver_unlock()
+{
+        mutex_unlock(&x86_cpu_hotplug_driver_mutex);
+}
+
+ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
+ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
 #else
 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
 #define get_idle_for_cpu(x)      (idle_thread_array[(x)])
@@ -281,6 +299,18 @@
 	 * fragile that we want to limit the things done here to the
 	 * most necessary things.
 	 */
+
+#ifdef CONFIG_X86_32
+	/*
+	 * Switch away from the trampoline page-table
+	 *
+	 * Do this before cpu_init() because it needs to access per-cpu
+	 * data which may not be mapped in the trampoline page-table.
+	 */
+	load_cr3(swapper_pg_dir);
+	__flush_tlb_all();
+#endif
+
 	vmi_bringup();
 	cpu_init();
 	preempt_disable();
@@ -299,12 +329,6 @@
 		legacy_pic->chip->unmask(0);
 	}
 
-#ifdef CONFIG_X86_32
-	while (low_mappings)
-		cpu_relax();
-	__flush_tlb_all();
-#endif
-
 	/* This must be done before setting cpu_online_mask */
 	set_cpu_sibling_map(raw_smp_processor_id());
 	wmb();
@@ -750,6 +774,7 @@
 #ifdef CONFIG_X86_32
 	/* Stack for startup_32 can be just as for start_secondary onwards */
 	irq_ctx_init(cpu);
+	initial_page_table = __pa(&trampoline_pg_dir);
 #else
 	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
 	initial_gs = per_cpu_offset(cpu);
@@ -897,20 +922,8 @@
 
 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 
-#ifdef CONFIG_X86_32
-	/* init low mem mapping */
-	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-		min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-	flush_tlb_all();
-	low_mappings = 1;
-
 	err = do_boot_cpu(apicid, cpu);
 
-	zap_low_mappings(false);
-	low_mappings = 0;
-#else
-	err = do_boot_cpu(apicid, cpu);
-#endif
 	if (err) {
 		pr_debug("do_boot_cpu failed %d\n", err);
 		return -EIO;
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 196552b..d5e0662 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -28,7 +28,9 @@
  * Do a system call from kernel instead of calling sys_execve so we
  * end up with proper pt_regs.
  */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
 {
 	long __res;
 	asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index c652ef6..a874495 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -1,6 +1,7 @@
 #include <linux/io.h>
 
 #include <asm/trampoline.h>
+#include <asm/pgtable.h>
 #include <asm/e820.h>
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
@@ -37,3 +38,20 @@
 	memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
 	return virt_to_phys(trampoline_base);
 }
+
+void __init setup_trampoline_page_table(void)
+{
+#ifdef CONFIG_X86_32
+	/* Copy kernel address range */
+	clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
+			swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+			min_t(unsigned long, KERNEL_PGD_PTRS,
+			      KERNEL_PGD_BOUNDARY));
+
+	/* Initialize low mappings */
+	clone_pgd_range(trampoline_pg_dir,
+			swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+			min_t(unsigned long, KERNEL_PGD_PTRS,
+			      KERNEL_PGD_BOUNDARY));
+#endif
+}
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 0fd6378..ddeb231 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -697,6 +697,7 @@
 	pit->wq = create_singlethread_workqueue("kvm-pit-wq");
 	if (!pit->wq) {
 		mutex_unlock(&pit->pit_state.lock);
+		kvm_free_irq_source_id(kvm, pit->irq_source_id);
 		kfree(pit);
 		return NULL;
 	}
@@ -742,7 +743,7 @@
 	kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
 	kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
 	kvm_free_irq_source_id(kvm, pit->irq_source_id);
-
+	destroy_workqueue(pit->wq);
 	kfree(pit);
 	return NULL;
 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 25f1907..3a09c62 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2387,7 +2387,7 @@
 	if (cpu_has_xsave)
 		memcpy(guest_xsave->region,
 			&vcpu->arch.guest_fpu.state->xsave,
-			sizeof(struct xsave_struct));
+			xstate_size);
 	else {
 		memcpy(guest_xsave->region,
 			&vcpu->arch.guest_fpu.state->fxsave,
@@ -2405,7 +2405,7 @@
 
 	if (cpu_has_xsave)
 		memcpy(&vcpu->arch.guest_fpu.state->xsave,
-			guest_xsave->region, sizeof(struct xsave_struct));
+			guest_xsave->region, xstate_size);
 	else {
 		if (xstate_bv & ~XSTATE_FPSSE)
 			return -EINVAL;
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 7c2f38f..e3558b9 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -318,8 +318,9 @@
  */
 
 asmlinkage
-long xtensa_execve(const char __user *name, char __user * __user *argv,
-                   char __user * __user *envp,
+long xtensa_execve(const char __user *name,
+		   const char __user *const __user *argv,
+                   const char __user *const __user *envp,
                    long a3, long a4, long a5,
                    struct pt_regs *regs)
 {
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index ea24c1e..2673a3d 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1588,7 +1588,7 @@
 	},
 };
 
-static int sata_dwc_probe(struct of_device *ofdev,
+static int sata_dwc_probe(struct platform_device *ofdev,
 			const struct of_device_id *match)
 {
 	struct sata_dwc_device *hsdev;
@@ -1702,7 +1702,7 @@
 	return err;
 }
 
-static int sata_dwc_remove(struct of_device *ofdev)
+static int sata_dwc_remove(struct platform_device *ofdev)
 {
 	struct device *dev = &ofdev->dev;
 	struct ata_host *host = dev_get_drvdata(dev);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 2982b3e..057413b 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -94,6 +94,7 @@
 #include <linux/hdreg.h>
 #include <linux/platform_device.h>
 #if defined(CONFIG_OF)
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #endif
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index ddf5def..710af89 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -819,13 +819,16 @@
 	    "Sandybridge", NULL, &intel_gen6_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
 	    "Sandybridge", NULL, &intel_gen6_driver },
+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG,
+	    "Sandybridge", NULL, &intel_gen6_driver },
 	{ 0, 0, NULL, NULL, NULL }
 };
 
 static int __devinit intel_gmch_probe(struct pci_dev *pdev,
 				      struct agp_bridge_data *bridge)
 {
-	int i;
+	int i, mask;
+
 	bridge->driver = NULL;
 
 	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -845,14 +848,19 @@
 
 	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
 
-	if (bridge->driver->mask_memory == intel_i965_mask_memory) {
-		if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
-			dev_err(&intel_private.pcidev->dev,
-				"set gfx device dma mask 36bit failed!\n");
-		else
-			pci_set_consistent_dma_mask(intel_private.pcidev,
-						    DMA_BIT_MASK(36));
-	}
+	if (bridge->driver->mask_memory == intel_gen6_mask_memory)
+		mask = 40;
+	else if (bridge->driver->mask_memory == intel_i965_mask_memory)
+		mask = 36;
+	else
+		mask = 32;
+
+	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+		dev_err(&intel_private.pcidev->dev,
+			"set gfx device dma mask %d-bit failed!\n", mask);
+	else
+		pci_set_consistent_dma_mask(intel_private.pcidev,
+					    DMA_BIT_MASK(mask));
 
 	return 1;
 }
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c05e3e5..08d4753 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -204,6 +204,7 @@
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG  0x0102
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB  0x0104
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG  0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG  0x0126
 
 /* cover 915 and 945 variants */
 #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index ad46eae..c350d01 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -675,8 +675,8 @@
 	}
 
 	set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-	filp->private_data = tty;
-	file_move(filp, &tty->tty_files);
+
+	tty_add_file(tty, filp);
 
 	retval = devpts_pty_new(inode, tty->link);
 	if (retval)
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 0350c42..949067a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -136,6 +136,9 @@
 DEFINE_MUTEX(tty_mutex);
 EXPORT_SYMBOL(tty_mutex);
 
+/* Spinlock to protect the tty->tty_files list */
+DEFINE_SPINLOCK(tty_files_lock);
+
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
 static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
 ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -185,6 +188,41 @@
 	kfree(tty);
 }
 
+static inline struct tty_struct *file_tty(struct file *file)
+{
+	return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+	struct tty_file_private *priv;
+
+	/* XXX: must implement proper error handling in callers */
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
+
+	priv->tty = tty;
+	priv->file = file;
+	file->private_data = priv;
+
+	spin_lock(&tty_files_lock);
+	list_add(&priv->list, &tty->tty_files);
+	spin_unlock(&tty_files_lock);
+}
+
+/* Delete file from its tty */
+void tty_del_file(struct file *file)
+{
+	struct tty_file_private *priv = file->private_data;
+
+	spin_lock(&tty_files_lock);
+	list_del(&priv->list);
+	spin_unlock(&tty_files_lock);
+	file->private_data = NULL;
+	kfree(priv);
+}
+
+
 #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
 
 /**
@@ -235,11 +273,11 @@
 	struct list_head *p;
 	int count = 0;
 
-	file_list_lock();
+	spin_lock(&tty_files_lock);
 	list_for_each(p, &tty->tty_files) {
 		count++;
 	}
-	file_list_unlock();
+	spin_unlock(&tty_files_lock);
 	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
 	    tty->driver->subtype == PTY_TYPE_SLAVE &&
 	    tty->link && tty->link->count)
@@ -497,6 +535,7 @@
 	struct file *cons_filp = NULL;
 	struct file *filp, *f = NULL;
 	struct task_struct *p;
+	struct tty_file_private *priv;
 	int    closecount = 0, n;
 	unsigned long flags;
 	int refs = 0;
@@ -506,7 +545,7 @@
 
 
 	spin_lock(&redirect_lock);
-	if (redirect && redirect->private_data == tty) {
+	if (redirect && file_tty(redirect) == tty) {
 		f = redirect;
 		redirect = NULL;
 	}
@@ -519,9 +558,10 @@
 	   workqueue with the lock held */
 	check_tty_count(tty, "tty_hangup");
 
-	file_list_lock();
+	spin_lock(&tty_files_lock);
 	/* This breaks for file handles being sent over AF_UNIX sockets ? */
-	list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
+	list_for_each_entry(priv, &tty->tty_files, list) {
+		filp = priv->file;
 		if (filp->f_op->write == redirected_tty_write)
 			cons_filp = filp;
 		if (filp->f_op->write != tty_write)
@@ -530,7 +570,7 @@
 		__tty_fasync(-1, filp, 0);	/* can't block */
 		filp->f_op = &hung_up_tty_fops;
 	}
-	file_list_unlock();
+	spin_unlock(&tty_files_lock);
 
 	tty_ldisc_hangup(tty);
 
@@ -889,12 +929,10 @@
 			loff_t *ppos)
 {
 	int i;
-	struct tty_struct *tty;
-	struct inode *inode;
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct tty_struct *tty = file_tty(file);
 	struct tty_ldisc *ld;
 
-	tty = file->private_data;
-	inode = file->f_path.dentry->d_inode;
 	if (tty_paranoia_check(tty, inode, "tty_read"))
 		return -EIO;
 	if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -1065,12 +1103,11 @@
 static ssize_t tty_write(struct file *file, const char __user *buf,
 						size_t count, loff_t *ppos)
 {
-	struct tty_struct *tty;
 	struct inode *inode = file->f_path.dentry->d_inode;
+	struct tty_struct *tty = file_tty(file);
+ 	struct tty_ldisc *ld;
 	ssize_t ret;
-	struct tty_ldisc *ld;
 
-	tty = file->private_data;
 	if (tty_paranoia_check(tty, inode, "tty_write"))
 		return -EIO;
 	if (!tty || !tty->ops->write ||
@@ -1424,9 +1461,9 @@
 	tty_driver_kref_put(driver);
 	module_put(driver->owner);
 
-	file_list_lock();
+	spin_lock(&tty_files_lock);
 	list_del_init(&tty->tty_files);
-	file_list_unlock();
+	spin_unlock(&tty_files_lock);
 
 	put_pid(tty->pgrp);
 	put_pid(tty->session);
@@ -1507,13 +1544,13 @@
 
 int tty_release(struct inode *inode, struct file *filp)
 {
-	struct tty_struct *tty, *o_tty;
+	struct tty_struct *tty = file_tty(filp);
+	struct tty_struct *o_tty;
 	int	pty_master, tty_closing, o_tty_closing, do_sleep;
 	int	devpts;
 	int	idx;
 	char	buf[64];
 
-	tty = filp->private_data;
 	if (tty_paranoia_check(tty, inode, "tty_release_dev"))
 		return 0;
 
@@ -1671,8 +1708,7 @@
 	 *  - do_tty_hangup no longer sees this file descriptor as
 	 *    something that needs to be handled for hangups.
 	 */
-	file_kill(filp);
-	filp->private_data = NULL;
+	tty_del_file(filp);
 
 	/*
 	 * Perform some housekeeping before deciding whether to return.
@@ -1839,8 +1875,8 @@
 		return PTR_ERR(tty);
 	}
 
-	filp->private_data = tty;
-	file_move(filp, &tty->tty_files);
+	tty_add_file(tty, filp);
+
 	check_tty_count(tty, "tty_open");
 	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
 	    tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1916,11 +1952,10 @@
 
 static unsigned int tty_poll(struct file *filp, poll_table *wait)
 {
-	struct tty_struct *tty;
+	struct tty_struct *tty = file_tty(filp);
 	struct tty_ldisc *ld;
 	int ret = 0;
 
-	tty = filp->private_data;
 	if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
 		return 0;
 
@@ -1933,11 +1968,10 @@
 
 static int __tty_fasync(int fd, struct file *filp, int on)
 {
-	struct tty_struct *tty;
+	struct tty_struct *tty = file_tty(filp);
 	unsigned long flags;
 	int retval = 0;
 
-	tty = filp->private_data;
 	if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
 		goto out;
 
@@ -2491,13 +2525,13 @@
  */
 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-	struct tty_struct *tty, *real_tty;
+	struct tty_struct *tty = file_tty(file);
+	struct tty_struct *real_tty;
 	void __user *p = (void __user *)arg;
 	int retval;
 	struct tty_ldisc *ld;
 	struct inode *inode = file->f_dentry->d_inode;
 
-	tty = file->private_data;
 	if (tty_paranoia_check(tty, inode, "tty_ioctl"))
 		return -EINVAL;
 
@@ -2619,7 +2653,7 @@
 				unsigned long arg)
 {
 	struct inode *inode = file->f_dentry->d_inode;
-	struct tty_struct *tty = file->private_data;
+	struct tty_struct *tty = file_tty(file);
 	struct tty_ldisc *ld;
 	int retval = -ENOIOCTLCMD;
 
@@ -2711,7 +2745,7 @@
 				if (!filp)
 					continue;
 				if (filp->f_op->read == tty_read &&
-				    filp->private_data == tty) {
+				    file_tty(filp) == tty) {
 					printk(KERN_NOTICE "SAK: killed process %d"
 					    " (%s): fd#%d opened to the tty\n",
 					    task_pid_nr(p), p->comm, i);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index c734f9b..50590c7 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -194,10 +194,11 @@
 int fg_console;
 int last_console;
 int want_console = -1;
-int saved_fg_console;
-int saved_last_console;
-int saved_want_console;
-int saved_vc_mode;
+static int saved_fg_console;
+static int saved_last_console;
+static int saved_want_console;
+static int saved_vc_mode;
+static int saved_console_blanked;
 
 /*
  * For each existing display, we have a pointer to console currently visible
@@ -3449,6 +3450,7 @@
 	saved_last_console = last_console;
 	saved_want_console = want_console;
 	saved_vc_mode = vc->vc_mode;
+	saved_console_blanked = console_blanked;
 	vc->vc_mode = KD_TEXT;
 	console_blanked = 0;
 	if (vc->vc_sw->con_debug_enter)
@@ -3492,6 +3494,7 @@
 	fg_console = saved_fg_console;
 	last_console = saved_last_console;
 	want_console = saved_want_console;
+	console_blanked = saved_console_blanked;
 	vc_cons[fg_console].d->vc_mode = saved_vc_mode;
 
 	vc = vc_cons[fg_console].d;
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 0ed763c..b663d57 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -94,6 +94,7 @@
 
 #ifdef CONFIG_OF
 /* For open firmware. */
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #endif
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 90288ec..84da748 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -55,6 +55,9 @@
 static int drm_version(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv);
 
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+	[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+
 /** Ioctl table */
 static struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -421,6 +424,7 @@
 	int retcode = -EINVAL;
 	char stack_kdata[128];
 	char *kdata = NULL;
+	unsigned int usize, asize;
 
 	dev = file_priv->minor->dev;
 	atomic_inc(&dev->ioctl_count);
@@ -436,11 +440,18 @@
 	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
 		goto err_i1;
 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
-	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+		u32 drv_size;
 		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+		drv_size = _IOC_SIZE(ioctl->cmd_drv);
+		usize = asize = _IOC_SIZE(cmd);
+		if (drv_size > asize)
+			asize = drv_size;
+	}
 	else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
 		ioctl = &drm_ioctls[nr];
 		cmd = ioctl->cmd;
+		usize = asize = _IOC_SIZE(cmd);
 	} else
 		goto err_i1;
 
@@ -460,10 +471,10 @@
 		retcode = -EACCES;
 	} else {
 		if (cmd & (IOC_IN | IOC_OUT)) {
-			if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
+			if (asize <= sizeof(stack_kdata)) {
 				kdata = stack_kdata;
 			} else {
-				kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+				kdata = kmalloc(asize, GFP_KERNEL);
 				if (!kdata) {
 					retcode = -ENOMEM;
 					goto err_i1;
@@ -473,11 +484,13 @@
 
 		if (cmd & IOC_IN) {
 			if (copy_from_user(kdata, (void __user *)arg,
-					   _IOC_SIZE(cmd)) != 0) {
+					   usize) != 0) {
 				retcode = -EFAULT;
 				goto err_i1;
 			}
-		}
+		} else
+			memset(kdata, 0, usize);
+
 		if (ioctl->flags & DRM_UNLOCKED)
 			retcode = func(dev, kdata, file_priv);
 		else {
@@ -488,7 +501,7 @@
 
 		if (cmd & IOC_OUT) {
 			if (copy_to_user((void __user *)arg, kdata,
-					 _IOC_SIZE(cmd)) != 0)
+					 usize) != 0)
 				retcode = -EFAULT;
 		}
 	}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index de82e20..8dd7e6f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -94,10 +94,11 @@
 	int i;
 	enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
 	struct drm_fb_helper_cmdline_mode *cmdline_mode;
-	struct drm_connector *connector = fb_helper_conn->connector;
+	struct drm_connector *connector;
 
 	if (!fb_helper_conn)
 		return false;
+	connector = fb_helper_conn->connector;
 
 	cmdline_mode = &fb_helper_conn->cmdline_mode;
 	if (!mode_option)
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 3778360..fda6746 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -138,7 +138,7 @@
 				break;
 		}
 
-		if (!agpmem)
+		if (&agpmem->head == &dev->agp->memory)
 			goto vm_fault_error;
 
 		/*
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 0e6c131..61b4caf 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1255,21 +1255,21 @@
 }
 
 struct drm_ioctl_desc i810_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
 };
 
 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 5168862..671aa18 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -1524,20 +1524,20 @@
 }
 
 struct drm_ioctl_desc i830_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
 };
 
 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index da78f2c..5c8e534 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -8,6 +8,7 @@
           i915_suspend.o \
 	  i915_gem.o \
 	  i915_gem_debug.o \
+	  i915_gem_evict.o \
 	  i915_gem_tiling.o \
 	  i915_trace_points.o \
 	  intel_display.o \
@@ -18,6 +19,7 @@
 	  intel_hdmi.o \
 	  intel_sdvo.o \
 	  intel_modes.o \
+	  intel_panel.o \
 	  intel_i2c.o \
 	  intel_fb.o \
 	  intel_tv.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 0d6ff64..8c2ad01 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -30,20 +30,17 @@
 #include "intel_drv.h"
 
 struct intel_dvo_device {
-	char *name;
+	const char *name;
 	int type;
 	/* DVOA/B/C output register */
 	u32 dvo_reg;
 	/* GPIO register used for i2c bus to control this device */
 	u32 gpio;
 	int slave_addr;
-	struct i2c_adapter *i2c_bus;
 
 	const struct intel_dvo_dev_ops *dev_ops;
 	void *dev_priv;
-
-	struct drm_display_mode *panel_fixed_mode;
-	bool panel_wants_dither;
+	struct i2c_adapter *i2c_bus;
 };
 
 struct intel_dvo_dev_ops {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9214119..92d5605 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -467,6 +467,9 @@
 		}
 	}
 
+	if (error->overlay)
+		intel_overlay_print_error_state(m, error->overlay);
+
 out:
 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f19ffe8..a7ec93e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -499,6 +499,13 @@
 		}
 	}
 
+
+	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+		BEGIN_LP_RING(2);
+		OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+		OUT_RING(MI_NOOP);
+		ADVANCE_LP_RING();
+	}
 	i915_emit_breadcrumb(dev);
 
 	return 0;
@@ -2360,46 +2367,46 @@
 }
 
 struct drm_ioctl_desc i915_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
-	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
-	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
-	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5044f65..00befce 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -181,6 +181,7 @@
 	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
 	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
 	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
 	{0, 0, 0}
 };
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 906663b..047cd7c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -113,6 +113,9 @@
 	int enabled;
 };
 
+struct intel_overlay;
+struct intel_overlay_error_state;
+
 struct drm_i915_master_private {
 	drm_local_map_t *sarea;
 	struct _drm_i915_sarea *sarea_priv;
@@ -166,6 +169,7 @@
 		u32 purgeable:1;
 	} *active_bo;
 	u32 active_bo_count;
+	struct intel_overlay_error_state *overlay;
 };
 
 struct drm_i915_display_funcs {
@@ -186,8 +190,6 @@
 	/* clock gating init */
 };
 
-struct intel_overlay;
-
 struct intel_device_info {
 	u8 is_mobile : 1;
 	u8 is_i8xx : 1;
@@ -242,6 +244,7 @@
 	struct pci_dev *bridge_dev;
 	struct intel_ring_buffer render_ring;
 	struct intel_ring_buffer bsd_ring;
+	uint32_t next_seqno;
 
 	drm_dma_handle_t *status_page_dmah;
 	void *seqno_page;
@@ -251,6 +254,7 @@
 	drm_local_map_t hws_map;
 	struct drm_gem_object *seqno_obj;
 	struct drm_gem_object *pwrctx;
+	struct drm_gem_object *renderctx;
 
 	struct resource mch_res;
 
@@ -285,6 +289,9 @@
 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
 	int vblank_pipe;
 	int num_pipe;
+	u32 flush_rings;
+#define FLUSH_RENDER_RING	0x1
+#define FLUSH_BSD_RING		0x2
 
 	/* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -568,8 +575,6 @@
 		 */
 		struct delayed_work retire_work;
 
-		uint32_t next_gem_seqno;
-
 		/**
 		 * Waiting sequence number, if any
 		 */
@@ -610,6 +615,8 @@
 	struct sdvo_device_mapping sdvo_mappings[2];
 	/* indicate whether the LVDS_BORDER should be enabled or not */
 	unsigned int lvds_border_bits;
+	/* Panel fitter placement and size for Ironlake+ */
+	u32 pch_pf_pos, pch_pf_size;
 
 	struct drm_crtc *plane_to_crtc_mapping[2];
 	struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -669,6 +676,8 @@
 	struct list_head list;
 	/** This object's place on GPU write list */
 	struct list_head gpu_write_list;
+	/** This object's place on eviction list */
+	struct list_head evict_list;
 
 	/**
 	 * This is set if the object is on the active or flushing lists
@@ -978,6 +987,7 @@
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 		     unsigned long end);
+int i915_gpu_idle(struct drm_device *dev);
 int i915_gem_idle(struct drm_device *dev);
 uint32_t i915_add_request(struct drm_device *dev,
 		struct drm_file *file_priv,
@@ -991,7 +1001,9 @@
 				      int write);
 int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
 int i915_gem_attach_phys_object(struct drm_device *dev,
-				struct drm_gem_object *obj, int id);
+				struct drm_gem_object *obj,
+				int id,
+				int align);
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
@@ -1003,6 +1015,11 @@
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
 
+/* i915_gem_evict.c */
+int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
+int i915_gem_evict_everything(struct drm_device *dev);
+int i915_gem_evict_inactive(struct drm_device *dev);
+
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
 void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -1066,6 +1083,10 @@
 extern void intel_detect_pch (struct drm_device *dev);
 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
+/* overlay */
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
 /**
  * Lock test for when it's just for synchronization of ring access.
  *
@@ -1092,26 +1113,26 @@
 #define I915_VERBOSE 0
 
 #define BEGIN_LP_RING(n)  do { \
-	drm_i915_private_t *dev_priv = dev->dev_private;                \
+	drm_i915_private_t *dev_priv__ = dev->dev_private;                \
 	if (I915_VERBOSE)						\
 		DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));		\
-	intel_ring_begin(dev, &dev_priv->render_ring, (n));		\
+	intel_ring_begin(dev, &dev_priv__->render_ring, (n));		\
 } while (0)
 
 
 #define OUT_RING(x) do {						\
-	drm_i915_private_t *dev_priv = dev->dev_private;		\
+	drm_i915_private_t *dev_priv__ = dev->dev_private;		\
 	if (I915_VERBOSE)						\
 		DRM_DEBUG("   OUT_RING %x\n", (int)(x));		\
-	intel_ring_emit(dev, &dev_priv->render_ring, x);		\
+	intel_ring_emit(dev, &dev_priv__->render_ring, x);		\
 } while (0)
 
 #define ADVANCE_LP_RING() do {						\
-	drm_i915_private_t *dev_priv = dev->dev_private;                \
+	drm_i915_private_t *dev_priv__ = dev->dev_private;                \
 	if (I915_VERBOSE)						\
 		DRM_DEBUG("ADVANCE_LP_RING %x\n",			\
-				dev_priv->render_ring.tail);		\
-	intel_ring_advance(dev, &dev_priv->render_ring);		\
+				dev_priv__->render_ring.tail);		\
+	intel_ring_advance(dev, &dev_priv__->render_ring);		\
 } while(0)
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0758c78..df5a713 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,6 +35,7 @@
 #include <linux/swap.h>
 #include <linux/pci.h>
 
+static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -48,8 +49,6 @@
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
 					   unsigned alignment);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev, int min_size);
-static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
 				struct drm_file *file_priv);
@@ -58,6 +57,14 @@
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
 
+static inline bool
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+{
+	return obj_priv->gtt_space &&
+		!obj_priv->active &&
+		obj_priv->pin_count == 0;
+}
+
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 		     unsigned long end)
 {
@@ -313,7 +320,8 @@
 	if (ret == -ENOMEM) {
 		struct drm_device *dev = obj->dev;
 
-		ret = i915_gem_evict_something(dev, obj->size);
+		ret = i915_gem_evict_something(dev, obj->size,
+					       i915_gem_get_gtt_alignment(obj));
 		if (ret)
 			return ret;
 
@@ -1036,6 +1044,11 @@
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 	}
 
+	
+	/* Maintain LRU order of "inactive" objects */
+	if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
+		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
 	drm_gem_object_unreference(obj);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -1137,7 +1150,7 @@
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	pgoff_t page_offset;
 	unsigned long pfn;
@@ -1155,8 +1168,6 @@
 		if (ret)
 			goto unlock;
 
-		list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
 		ret = i915_gem_object_set_to_gtt_domain(obj, write);
 		if (ret)
 			goto unlock;
@@ -1169,6 +1180,9 @@
 			goto unlock;
 	}
 
+	if (i915_gem_object_is_inactive(obj_priv))
+		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
 	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
 		page_offset;
 
@@ -1363,7 +1377,6 @@
 			struct drm_file *file_priv)
 {
 	struct drm_i915_gem_mmap_gtt *args = data;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_gem_object *obj;
 	struct drm_i915_gem_object *obj_priv;
 	int ret;
@@ -1409,7 +1422,6 @@
 			mutex_unlock(&dev->struct_mutex);
 			return ret;
 		}
-		list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 	}
 
 	drm_gem_object_unreference(obj);
@@ -1493,9 +1505,16 @@
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct inode *inode;
 
+	/* Our goal here is to return as much of the memory as
+	 * is possible back to the system as we are called from OOM.
+	 * To do this we must instruct the shmfs to drop all of its
+	 * backing pages, *now*. Here we mirror the actions taken
+	 * when by shmem_delete_inode() to release the backing store.
+	 */
 	inode = obj->filp->f_path.dentry->d_inode;
-	if (inode->i_op->truncate)
-		inode->i_op->truncate (inode);
+	truncate_inode_pages(inode->i_mapping, 0);
+	if (inode->i_op->truncate_range)
+		inode->i_op->truncate_range(inode, 0, (loff_t)-1);
 
 	obj_priv->madv = __I915_MADV_PURGED;
 }
@@ -1887,19 +1906,6 @@
 				flush_domains);
 }
 
-static void
-i915_gem_flush_ring(struct drm_device *dev,
-	       uint32_t invalidate_domains,
-	       uint32_t flush_domains,
-	       struct intel_ring_buffer *ring)
-{
-	if (flush_domains & I915_GEM_DOMAIN_CPU)
-		drm_agp_chipset_flush(dev);
-	ring->flush(dev, ring,
-			invalidate_domains,
-			flush_domains);
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -1973,8 +1979,6 @@
 	 * cause memory corruption through use-after-free.
 	 */
 
-	BUG_ON(obj_priv->active);
-
 	/* release the fence reg _after_ flushing */
 	if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
 		i915_gem_clear_fence_reg(obj);
@@ -2010,34 +2014,7 @@
 	return ret;
 }
 
-static struct drm_gem_object *
-i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_gem_object *best = NULL;
-	struct drm_gem_object *first = NULL;
-
-	/* Try to find the smallest clean object */
-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-		struct drm_gem_object *obj = &obj_priv->base;
-		if (obj->size >= min_size) {
-			if ((!obj_priv->dirty ||
-			     i915_gem_object_is_purgeable(obj_priv)) &&
-			    (!best || obj->size < best->size)) {
-				best = obj;
-				if (best->size == min_size)
-					return best;
-			}
-			if (!first)
-			    first = obj;
-		}
-	}
-
-	return best ? best : first;
-}
-
-static int
+int
 i915_gpu_idle(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2078,155 +2055,6 @@
 	return ret;
 }
 
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
-	bool lists_empty;
-
-	spin_lock(&dev_priv->mm.active_list_lock);
-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->render_ring.active_list) &&
-		       (!HAS_BSD(dev)
-			|| list_empty(&dev_priv->bsd_ring.active_list)));
-	spin_unlock(&dev_priv->mm.active_list_lock);
-
-	if (lists_empty)
-		return -ENOSPC;
-
-	/* Flush everything (on to the inactive lists) and evict */
-	ret = i915_gpu_idle(dev);
-	if (ret)
-		return ret;
-
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
-	ret = i915_gem_evict_from_inactive_list(dev);
-	if (ret)
-		return ret;
-
-	spin_lock(&dev_priv->mm.active_list_lock);
-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->render_ring.active_list) &&
-		       (!HAS_BSD(dev)
-			|| list_empty(&dev_priv->bsd_ring.active_list)));
-	spin_unlock(&dev_priv->mm.active_list_lock);
-	BUG_ON(!lists_empty);
-
-	return 0;
-}
-
-static int
-i915_gem_evict_something(struct drm_device *dev, int min_size)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	int ret;
-
-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
-	struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
-	for (;;) {
-		i915_gem_retire_requests(dev);
-
-		/* If there's an inactive buffer available now, grab it
-		 * and be done.
-		 */
-		obj = i915_gem_find_inactive_object(dev, min_size);
-		if (obj) {
-			struct drm_i915_gem_object *obj_priv;
-
-#if WATCH_LRU
-			DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
-			obj_priv = to_intel_bo(obj);
-			BUG_ON(obj_priv->pin_count != 0);
-			BUG_ON(obj_priv->active);
-
-			/* Wait on the rendering and unbind the buffer. */
-			return i915_gem_object_unbind(obj);
-		}
-
-		/* If we didn't get anything, but the ring is still processing
-		 * things, wait for the next to finish and hopefully leave us
-		 * a buffer to evict.
-		 */
-		if (!list_empty(&render_ring->request_list)) {
-			struct drm_i915_gem_request *request;
-
-			request = list_first_entry(&render_ring->request_list,
-						   struct drm_i915_gem_request,
-						   list);
-
-			ret = i915_wait_request(dev,
-					request->seqno, request->ring);
-			if (ret)
-				return ret;
-
-			continue;
-		}
-
-		if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
-			struct drm_i915_gem_request *request;
-
-			request = list_first_entry(&bsd_ring->request_list,
-						   struct drm_i915_gem_request,
-						   list);
-
-			ret = i915_wait_request(dev,
-					request->seqno, request->ring);
-			if (ret)
-				return ret;
-
-			continue;
-		}
-
-		/* If we didn't have anything on the request list but there
-		 * are buffers awaiting a flush, emit one and try again.
-		 * When we wait on it, those buffers waiting for that flush
-		 * will get moved to inactive.
-		 */
-		if (!list_empty(&dev_priv->mm.flushing_list)) {
-			struct drm_i915_gem_object *obj_priv;
-
-			/* Find an object that we can immediately reuse */
-			list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
-				obj = &obj_priv->base;
-				if (obj->size >= min_size)
-					break;
-
-				obj = NULL;
-			}
-
-			if (obj != NULL) {
-				uint32_t seqno;
-
-				i915_gem_flush_ring(dev,
-					       obj->write_domain,
-					       obj->write_domain,
-					       obj_priv->ring);
-				seqno = i915_add_request(dev, NULL,
-						obj->write_domain,
-						obj_priv->ring);
-				if (seqno == 0)
-					return -ENOMEM;
-				continue;
-			}
-		}
-
-		/* If we didn't do any of the above, there's no single buffer
-		 * large enough to swap out for the new one, so just evict
-		 * everything and start again. (This should be rare.)
-		 */
-		if (!list_empty (&dev_priv->mm.inactive_list))
-			return i915_gem_evict_from_inactive_list(dev);
-		else
-			return i915_gem_evict_everything(dev);
-	}
-}
-
 int
 i915_gem_object_get_pages(struct drm_gem_object *obj,
 			  gfp_t gfpmask)
@@ -2666,7 +2494,7 @@
 #if WATCH_LRU
 		DRM_INFO("%s: GTT full, evicting something\n", __func__);
 #endif
-		ret = i915_gem_evict_something(dev, obj->size);
+		ret = i915_gem_evict_something(dev, obj->size, alignment);
 		if (ret)
 			return ret;
 
@@ -2684,7 +2512,8 @@
 
 		if (ret == -ENOMEM) {
 			/* first try to clear up some space from the GTT */
-			ret = i915_gem_evict_something(dev, obj->size);
+			ret = i915_gem_evict_something(dev, obj->size,
+						       alignment);
 			if (ret) {
 				/* now try to shrink everyone else */
 				if (gfpmask) {
@@ -2714,7 +2543,7 @@
 		drm_mm_put_block(obj_priv->gtt_space);
 		obj_priv->gtt_space = NULL;
 
-		ret = i915_gem_evict_something(dev, obj->size);
+		ret = i915_gem_evict_something(dev, obj->size, alignment);
 		if (ret)
 			return ret;
 
@@ -2723,6 +2552,9 @@
 	atomic_inc(&dev->gtt_count);
 	atomic_add(obj->size, &dev->gtt_memory);
 
+	/* keep track of bounds object by adding it to the inactive list */
+	list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
 	/* Assert that the object is not currently in any GPU domain. As it
 	 * wasn't in the GTT, there shouldn't be any way it could have been in
 	 * a GPU cache
@@ -3117,6 +2949,7 @@
 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 {
 	struct drm_device		*dev = obj->dev;
+	drm_i915_private_t		*dev_priv = dev->dev_private;
 	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
 	uint32_t			invalidate_domains = 0;
 	uint32_t			flush_domains = 0;
@@ -3179,6 +3012,13 @@
 		obj->pending_write_domain = obj->write_domain;
 	obj->read_domains = obj->pending_read_domains;
 
+	if (flush_domains & I915_GEM_GPU_DOMAINS) {
+		if (obj_priv->ring == &dev_priv->render_ring)
+			dev_priv->flush_rings |= FLUSH_RENDER_RING;
+		else if (obj_priv->ring == &dev_priv->bsd_ring)
+			dev_priv->flush_rings |= FLUSH_BSD_RING;
+	}
+
 	dev->invalidate_domains |= invalidate_domains;
 	dev->flush_domains |= flush_domains;
 #if WATCH_BUF
@@ -3718,7 +3558,6 @@
 		ring = &dev_priv->render_ring;
 	}
 
-
 	if (args->buffer_count < 1) {
 		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
 		return -EINVAL;
@@ -3892,6 +3731,7 @@
 	 */
 	dev->invalidate_domains = 0;
 	dev->flush_domains = 0;
+	dev_priv->flush_rings = 0;
 
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_gem_object *obj = object_list[i];
@@ -3912,16 +3752,14 @@
 		i915_gem_flush(dev,
 			       dev->invalidate_domains,
 			       dev->flush_domains);
-		if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
+		if (dev_priv->flush_rings & FLUSH_RENDER_RING)
 			(void)i915_add_request(dev, file_priv,
-					dev->flush_domains,
-					&dev_priv->render_ring);
-
-			if (HAS_BSD(dev))
-				(void)i915_add_request(dev, file_priv,
-						dev->flush_domains,
-						&dev_priv->bsd_ring);
-		}
+					       dev->flush_domains,
+					       &dev_priv->render_ring);
+		if (dev_priv->flush_rings & FLUSH_BSD_RING)
+			(void)i915_add_request(dev, file_priv,
+					       dev->flush_domains,
+					       &dev_priv->bsd_ring);
 	}
 
 	for (i = 0; i < args->buffer_count; i++) {
@@ -4192,6 +4030,10 @@
 		if (alignment == 0)
 			alignment = i915_gem_get_gtt_alignment(obj);
 		if (obj_priv->gtt_offset & (alignment - 1)) {
+			WARN(obj_priv->pin_count,
+			     "bo is already pinned with incorrect alignment:"
+			     " offset=%x, req.alignment=%x\n",
+			     obj_priv->gtt_offset, alignment);
 			ret = i915_gem_object_unbind(obj);
 			if (ret)
 				return ret;
@@ -4213,8 +4055,7 @@
 		atomic_inc(&dev->pin_count);
 		atomic_add(obj->size, &dev->pin_memory);
 		if (!obj_priv->active &&
-		    (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
-		    !list_empty(&obj_priv->list))
+		    (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
 			list_del_init(&obj_priv->list);
 	}
 	i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4359,22 +4200,34 @@
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	/* Update the active list for the hardware's current position.
-	 * Otherwise this only updates on a delayed timer or when irqs are
-	 * actually unmasked, and our working set ends up being larger than
-	 * required.
-	 */
-	i915_gem_retire_requests(dev);
 
-	obj_priv = to_intel_bo(obj);
-	/* Don't count being on the flushing list against the object being
-	 * done.  Otherwise, a buffer left on the flushing list but not getting
-	 * flushed (because nobody's flushing that domain) won't ever return
-	 * unbusy and get reused by libdrm's bo cache.  The other expected
-	 * consumer of this interface, OpenGL's occlusion queries, also specs
-	 * that the objects get unbusy "eventually" without any interference.
+	/* Count all active objects as busy, even if they are currently not used
+	 * by the gpu. Users of this interface expect objects to eventually
+	 * become non-busy without any further actions, therefore emit any
+	 * necessary flushes here.
 	 */
-	args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
+	obj_priv = to_intel_bo(obj);
+	args->busy = obj_priv->active;
+	if (args->busy) {
+		/* Unconditionally flush objects, even when the gpu still uses this
+		 * object. Userspace calling this function indicates that it wants to
+		 * use this buffer rather sooner than later, so issuing the required
+		 * flush earlier is beneficial.
+		 */
+		if (obj->write_domain) {
+			i915_gem_flush(dev, 0, obj->write_domain);
+			(void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
+		}
+
+		/* Update the active list for the hardware's current position.
+		 * Otherwise this only updates on a delayed timer or when irqs
+		 * are actually unmasked, and our working set ends up being
+		 * larger than required.
+		 */
+		i915_gem_retire_requests_ring(dev, obj_priv->ring);
+
+		args->busy = obj_priv->active;
+	}
 
 	drm_gem_object_unreference(obj);
 	mutex_unlock(&dev->struct_mutex);
@@ -4514,30 +4367,6 @@
 	i915_gem_free_object_tail(obj);
 }
 
-/** Unbinds all inactive objects. */
-static int
-i915_gem_evict_from_inactive_list(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	while (!list_empty(&dev_priv->mm.inactive_list)) {
-		struct drm_gem_object *obj;
-		int ret;
-
-		obj = &list_first_entry(&dev_priv->mm.inactive_list,
-					struct drm_i915_gem_object,
-					list)->base;
-
-		ret = i915_gem_object_unbind(obj);
-		if (ret != 0) {
-			DRM_ERROR("Error unbinding object: %d\n", ret);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
 int
 i915_gem_idle(struct drm_device *dev)
 {
@@ -4562,7 +4391,7 @@
 
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = i915_gem_evict_from_inactive_list(dev);
+		ret = i915_gem_evict_inactive(dev);
 		if (ret) {
 			mutex_unlock(&dev->struct_mutex);
 			return ret;
@@ -4680,6 +4509,8 @@
 			goto cleanup_render_ring;
 	}
 
+	dev_priv->next_seqno = 1;
+
 	return 0;
 
 cleanup_render_ring:
@@ -4841,7 +4672,7 @@
  * e.g. for cursor + overlay regs
  */
 int i915_gem_init_phys_object(struct drm_device *dev,
-			      int id, int size)
+			      int id, int size, int align)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_phys_object *phys_obj;
@@ -4856,7 +4687,7 @@
 
 	phys_obj->id = id;
 
-	phys_obj->handle = drm_pci_alloc(dev, size, 0);
+	phys_obj->handle = drm_pci_alloc(dev, size, align);
 	if (!phys_obj->handle) {
 		ret = -ENOMEM;
 		goto kfree_obj;
@@ -4938,7 +4769,9 @@
 
 int
 i915_gem_attach_phys_object(struct drm_device *dev,
-			    struct drm_gem_object *obj, int id)
+			    struct drm_gem_object *obj,
+			    int id,
+			    int align)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv;
@@ -4957,11 +4790,10 @@
 		i915_gem_detach_phys_object(dev, obj);
 	}
 
-
 	/* create a new object */
 	if (!dev_priv->mm.phys_objs[id - 1]) {
 		ret = i915_gem_init_phys_object(dev, id,
-						obj->size);
+						obj->size, align);
 		if (ret) {
 			DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
 			goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644
index 0000000..72cae3c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+
+static struct drm_i915_gem_object *
+i915_gem_next_active_object(struct drm_device *dev,
+			    struct list_head **render_iter,
+			    struct list_head **bsd_iter)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
+
+	if (*render_iter != &dev_priv->render_ring.active_list)
+		render_obj = list_entry(*render_iter,
+					struct drm_i915_gem_object,
+					list);
+
+	if (HAS_BSD(dev)) {
+		if (*bsd_iter != &dev_priv->bsd_ring.active_list)
+			bsd_obj = list_entry(*bsd_iter,
+					     struct drm_i915_gem_object,
+					     list);
+
+		if (render_obj == NULL) {
+			*bsd_iter = (*bsd_iter)->next;
+			return bsd_obj;
+		}
+
+		if (bsd_obj == NULL) {
+			*render_iter = (*render_iter)->next;
+			return render_obj;
+		}
+
+		/* XXX can we handle seqno wrapping? */
+		if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
+			*render_iter = (*render_iter)->next;
+			return render_obj;
+		} else {
+			*bsd_iter = (*bsd_iter)->next;
+			return bsd_obj;
+		}
+	} else {
+		*render_iter = (*render_iter)->next;
+		return render_obj;
+	}
+}
+
+static bool
+mark_free(struct drm_i915_gem_object *obj_priv,
+	   struct list_head *unwind)
+{
+	list_add(&obj_priv->evict_list, unwind);
+	return drm_mm_scan_add_block(obj_priv->gtt_space);
+}
+
+#define i915_for_each_active_object(OBJ, R, B) \
+	*(R) = dev_priv->render_ring.active_list.next; \
+	*(B) = dev_priv->bsd_ring.active_list.next; \
+	while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
+
+int
+i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct list_head eviction_list, unwind_list;
+	struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
+	struct list_head *render_iter, *bsd_iter;
+	int ret = 0;
+
+	i915_gem_retire_requests(dev);
+
+	/* Re-check for free space after retiring requests */
+	if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+			       min_size, alignment, 0))
+		return 0;
+
+	/*
+	 * The goal is to evict objects and amalgamate space in LRU order.
+	 * The oldest idle objects reside on the inactive list, which is in
+	 * retirement order. The next objects to retire are those on the (per
+	 * ring) active list that do not have an outstanding flush. Once the
+	 * hardware reports completion (the seqno is updated after the
+	 * batchbuffer has been finished) the clean buffer objects would
+	 * be retired to the inactive list. Any dirty objects would be added
+	 * to the tail of the flushing list. So after processing the clean
+	 * active objects we need to emit a MI_FLUSH to retire the flushing
+	 * list, hence the retirement order of the flushing list is in
+	 * advance of the dirty objects on the active lists.
+	 *
+	 * The retirement sequence is thus:
+	 *   1. Inactive objects (already retired)
+	 *   2. Clean active objects
+	 *   3. Flushing list
+	 *   4. Dirty active objects.
+	 *
+	 * On each list, the oldest objects lie at the HEAD with the freshest
+	 * object on the TAIL.
+	 */
+
+	INIT_LIST_HEAD(&unwind_list);
+	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+
+	/* First see if there is a large enough contiguous idle region... */
+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+		if (mark_free(obj_priv, &unwind_list))
+			goto found;
+	}
+
+	/* Now merge in the soon-to-be-expired objects... */
+	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+		/* Does the object require an outstanding flush? */
+		if (obj_priv->base.write_domain || obj_priv->pin_count)
+			continue;
+
+		if (mark_free(obj_priv, &unwind_list))
+			goto found;
+	}
+
+	/* Finally add anything with a pending flush (in order of retirement) */
+	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+		if (obj_priv->pin_count)
+			continue;
+
+		if (mark_free(obj_priv, &unwind_list))
+			goto found;
+	}
+	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+		if (! obj_priv->base.write_domain || obj_priv->pin_count)
+			continue;
+
+		if (mark_free(obj_priv, &unwind_list))
+			goto found;
+	}
+
+	/* Nothing found, clean up and bail out! */
+	list_for_each_entry(obj_priv, &unwind_list, evict_list) {
+		ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+		BUG_ON(ret);
+	}
+
+	/* We expect the caller to unpin, evict all and try again, or give up.
+	 * So calling i915_gem_evict_everything() is unnecessary.
+	 */
+	return -ENOSPC;
+
+found:
+	INIT_LIST_HEAD(&eviction_list);
+	list_for_each_entry_safe(obj_priv, tmp_obj_priv,
+				 &unwind_list, evict_list) {
+		if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
+			/* drm_mm doesn't allow any other other operations while
+			 * scanning, therefore store to be evicted objects on a
+			 * temporary list. */
+			list_move(&obj_priv->evict_list, &eviction_list);
+		}
+	}
+
+	/* Unbinding will emit any required flushes */
+	list_for_each_entry_safe(obj_priv, tmp_obj_priv,
+				 &eviction_list, evict_list) {
+#if WATCH_LRU
+		DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+		ret = i915_gem_object_unbind(&obj_priv->base);
+		if (ret)
+			return ret;
+	}
+
+	/* The just created free hole should be on the top of the free stack
+	 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
+	 * Furthermore all accessed data has just recently been used, so it
+	 * should be really fast, too. */
+	BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
+				   alignment, 0));
+
+	return 0;
+}
+
+int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+	bool lists_empty;
+
+	spin_lock(&dev_priv->mm.active_list_lock);
+	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+		       list_empty(&dev_priv->mm.flushing_list) &&
+		       list_empty(&dev_priv->render_ring.active_list) &&
+		       (!HAS_BSD(dev)
+			|| list_empty(&dev_priv->bsd_ring.active_list)));
+	spin_unlock(&dev_priv->mm.active_list_lock);
+
+	if (lists_empty)
+		return -ENOSPC;
+
+	/* Flush everything (on to the inactive lists) and evict */
+	ret = i915_gpu_idle(dev);
+	if (ret)
+		return ret;
+
+	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+	ret = i915_gem_evict_inactive(dev);
+	if (ret)
+		return ret;
+
+	spin_lock(&dev_priv->mm.active_list_lock);
+	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+		       list_empty(&dev_priv->mm.flushing_list) &&
+		       list_empty(&dev_priv->render_ring.active_list) &&
+		       (!HAS_BSD(dev)
+			|| list_empty(&dev_priv->bsd_ring.active_list)));
+	spin_unlock(&dev_priv->mm.active_list_lock);
+	BUG_ON(!lists_empty);
+
+	return 0;
+}
+
+/** Unbinds all inactive objects. */
+int
+i915_gem_evict_inactive(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	while (!list_empty(&dev_priv->mm.inactive_list)) {
+		struct drm_gem_object *obj;
+		int ret;
+
+		obj = &list_first_entry(&dev_priv->mm.inactive_list,
+					struct drm_i915_gem_object,
+					list)->base;
+
+		ret = i915_gem_object_unbind(obj);
+		if (ret != 0) {
+			DRM_ERROR("Error unbinding object: %d\n", ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85785a8..16861b8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -425,9 +425,11 @@
 i915_error_object_create(struct drm_device *dev,
 			 struct drm_gem_object *src)
 {
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_error_object *dst;
 	struct drm_i915_gem_object *src_priv;
 	int page, page_count;
+	u32 reloc_offset;
 
 	if (src == NULL)
 		return NULL;
@@ -442,18 +444,27 @@
 	if (dst == NULL)
 		return NULL;
 
+	reloc_offset = src_priv->gtt_offset;
 	for (page = 0; page < page_count; page++) {
-		void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
 		unsigned long flags;
+		void __iomem *s;
+		void *d;
 
+		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
 		if (d == NULL)
 			goto unwind;
+
 		local_irq_save(flags);
-		s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
-		memcpy(d, s, PAGE_SIZE);
-		kunmap_atomic(s, KM_IRQ0);
+		s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+					     reloc_offset,
+					     KM_IRQ0);
+		memcpy_fromio(d, s, PAGE_SIZE);
+		io_mapping_unmap_atomic(s, KM_IRQ0);
 		local_irq_restore(flags);
+
 		dst->pages[page] = d;
+
+		reloc_offset += PAGE_SIZE;
 	}
 	dst->page_count = page_count;
 	dst->gtt_offset = src_priv->gtt_offset;
@@ -489,6 +500,7 @@
 	i915_error_object_free(error->batchbuffer[1]);
 	i915_error_object_free(error->ringbuffer);
 	kfree(error->active_bo);
+	kfree(error->overlay);
 	kfree(error);
 }
 
@@ -612,18 +624,57 @@
 
 		if (batchbuffer[1] == NULL &&
 		    error->acthd >= obj_priv->gtt_offset &&
-		    error->acthd < obj_priv->gtt_offset + obj->size &&
-		    batchbuffer[0] != obj)
+		    error->acthd < obj_priv->gtt_offset + obj->size)
 			batchbuffer[1] = obj;
 
 		count++;
 	}
+	/* Scan the other lists for completeness for those bizarre errors. */
+	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+			struct drm_gem_object *obj = &obj_priv->base;
+
+			if (batchbuffer[0] == NULL &&
+			    bbaddr >= obj_priv->gtt_offset &&
+			    bbaddr < obj_priv->gtt_offset + obj->size)
+				batchbuffer[0] = obj;
+
+			if (batchbuffer[1] == NULL &&
+			    error->acthd >= obj_priv->gtt_offset &&
+			    error->acthd < obj_priv->gtt_offset + obj->size)
+				batchbuffer[1] = obj;
+
+			if (batchbuffer[0] && batchbuffer[1])
+				break;
+		}
+	}
+	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+			struct drm_gem_object *obj = &obj_priv->base;
+
+			if (batchbuffer[0] == NULL &&
+			    bbaddr >= obj_priv->gtt_offset &&
+			    bbaddr < obj_priv->gtt_offset + obj->size)
+				batchbuffer[0] = obj;
+
+			if (batchbuffer[1] == NULL &&
+			    error->acthd >= obj_priv->gtt_offset &&
+			    error->acthd < obj_priv->gtt_offset + obj->size)
+				batchbuffer[1] = obj;
+
+			if (batchbuffer[0] && batchbuffer[1])
+				break;
+		}
+	}
 
 	/* We need to copy these to an anonymous buffer as the simplest
 	 * method to avoid being overwritten by userpace.
 	 */
 	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
-	error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+	if (batchbuffer[1] != batchbuffer[0])
+		error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+	else
+		error->batchbuffer[1] = NULL;
 
 	/* Record the ringbuffer */
 	error->ringbuffer = i915_error_object_create(dev,
@@ -667,6 +718,8 @@
 
 	do_gettimeofday(&error->time);
 
+	error->overlay = intel_overlay_capture_error_state(dev);
+
 	spin_lock_irqsave(&dev_priv->error_lock, flags);
 	if (dev_priv->first_error == NULL) {
 		dev_priv->first_error = error;
@@ -1251,6 +1304,16 @@
 				&dev_priv->render_ring),
 			i915_get_tail_request(dev)->seqno)) {
 		dev_priv->hangcheck_count = 0;
+
+		/* Issue a wake-up to catch stuck h/w. */
+		if (dev_priv->render_ring.waiting_gem_seqno |
+		    dev_priv->bsd_ring.waiting_gem_seqno) {
+			DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
+			if (dev_priv->render_ring.waiting_gem_seqno)
+				DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+			if (dev_priv->bsd_ring.waiting_gem_seqno)
+				DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+		}
 		return;
 	}
 
@@ -1318,12 +1381,17 @@
 	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
 	(void) I915_READ(DEIER);
 
-	/* user interrupt should be enabled, but masked initial */
+	/* Gen6 only needs render pipe_control now */
+	if (IS_GEN6(dev))
+		render_mask = GT_PIPE_NOTIFY;
+
 	dev_priv->gt_irq_mask_reg = ~render_mask;
 	dev_priv->gt_irq_enable_reg = render_mask;
 
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+	if (IS_GEN6(dev))
+		I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
 	(void) I915_READ(GTIER);
 
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index d1bf92b..ea5d3fe 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -114,10 +114,6 @@
 #define ASLE_REQ_MSK           0xf
 
 /* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAIL    (2<<10)
-#define ASLE_BACKLIGHT_FAIL    (2<<12)
-#define ASLE_PFIT_FAIL         (2<<14)
-#define ASLE_PWM_FREQ_FAIL     (2<<16)
 #define ASLE_ALS_ILLUM_FAILED	(1<<10)
 #define ASLE_BACKLIGHT_FAILED	(1<<12)
 #define ASLE_PFIT_FAILED	(1<<14)
@@ -155,11 +151,11 @@
 	u32 max_backlight, level, shift;
 
 	if (!(bclp & ASLE_BCLP_VALID))
-		return ASLE_BACKLIGHT_FAIL;
+		return ASLE_BACKLIGHT_FAILED;
 
 	bclp &= ASLE_BCLP_MSK;
 	if (bclp < 0 || bclp > 255)
-		return ASLE_BACKLIGHT_FAIL;
+		return ASLE_BACKLIGHT_FAILED;
 
 	blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
 	blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
@@ -211,7 +207,7 @@
 	/* Panel fitting is currently controlled by the X code, so this is a
 	   noop until modesetting support works fully */
 	if (!(pfit & ASLE_PFIT_VALID))
-		return ASLE_PFIT_FAIL;
+		return ASLE_PFIT_FAILED;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 281db6e..67e3ec1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -170,6 +170,7 @@
 #define   MI_NO_WRITE_FLUSH	(1 << 2)
 #define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
 #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
+#define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
 #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
 #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
 #define MI_OVERLAY_FLIP		MI_INSTR(0x11,0)
@@ -180,6 +181,12 @@
 #define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
 #define MI_DISPLAY_FLIP_I915	MI_INSTR(0x14, 1)
 #define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+#define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
+#define   MI_MM_SPACE_GTT		(1<<8)
+#define   MI_MM_SPACE_PHYSICAL		(0<<8)
+#define   MI_SAVE_EXT_STATE_EN		(1<<3)
+#define   MI_RESTORE_EXT_STATE_EN	(1<<2)
+#define   MI_RESTORE_INHIBIT		(1<<0)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
@@ -1100,6 +1107,11 @@
 #define PEG_BAND_GAP_DATA	0x14d68
 
 /*
+ * Logical Context regs
+ */
+#define CCID			0x2180
+#define   CCID_EN		(1<<0)
+/*
  * Overlay regs
  */
 
@@ -2069,6 +2081,7 @@
 #define PIPE_DITHER_TYPE_ST01		(1 << 2)
 /* Pipe A */
 #define PIPEADSL		0x70000
+#define   DSL_LINEMASK	       	0x00000fff
 #define PIPEACONF		0x70008
 #define   PIPEACONF_ENABLE	(1<<31)
 #define   PIPEACONF_DISABLE	0
@@ -2928,6 +2941,7 @@
 #define  TRANS_DP_VSYNC_ACTIVE_LOW	0
 #define  TRANS_DP_HSYNC_ACTIVE_HIGH	(1<<3)
 #define  TRANS_DP_HSYNC_ACTIVE_LOW	0
+#define  TRANS_DP_SYNC_MASK	(3<<3)
 
 /* SNB eDP training params */
 /* SNB A-stepping */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6e20252..2c6b98f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,7 +34,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32	dpll_reg;
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
 	} else {
 		dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@
 	if (!i915_pipe_enabled(dev, pipe))
 		return;
 
-	if (IS_IRONLAKE(dev))
+	if (HAS_PCH_SPLIT(dev))
 		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
@@ -75,7 +75,7 @@
 	if (!i915_pipe_enabled(dev, pipe))
 		return;
 
-	if (IS_IRONLAKE(dev))
+	if (HAS_PCH_SPLIT(dev))
 		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
@@ -239,7 +239,7 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
 		dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
 	}
@@ -247,7 +247,7 @@
 	/* Pipe & plane A info */
 	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
 	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
 		dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
 		dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@
 		dev_priv->saveFPA1 = I915_READ(FPA1);
 		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
 	}
-	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
 		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
 	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
 	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@
 	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
 	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
 	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
-	if (!IS_IRONLAKE(dev))
+	if (!HAS_PCH_SPLIT(dev))
 		dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
 		dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
 		dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@
 	/* Pipe & plane B info */
 	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
 	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
 		dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
 		dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@
 		dev_priv->saveFPB1 = I915_READ(FPB1);
 		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
 	}
-	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
 		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
 	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
 	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@
 	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
 	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
 	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
-	if (!IS_IRONLAKE(dev))
+	if (!HAS_PCH_SPLIT(dev))
 		dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
 		dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
 		dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dpll_a_reg = PCH_DPLL_A;
 		dpll_b_reg = PCH_DPLL_B;
 		fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@
 		fpb1_reg = FPB1;
 	}
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
 		I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
 	}
@@ -395,16 +395,20 @@
 	if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
 		I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
 			   ~DPLL_VCO_ENABLE);
-		DRM_UDELAY(150);
+		POSTING_READ(dpll_a_reg);
+		udelay(150);
 	}
 	I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
 	I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
 	/* Actually enable it */
 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
-	DRM_UDELAY(150);
-	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+	POSTING_READ(dpll_a_reg);
+	udelay(150);
+	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
-	DRM_UDELAY(150);
+		POSTING_READ(DPLL_A_MD);
+	}
+	udelay(150);
 
 	/* Restore mode */
 	I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
@@ -413,10 +417,10 @@
 	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
 	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
 	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
-	if (!IS_IRONLAKE(dev))
+	if (!HAS_PCH_SPLIT(dev))
 		I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
 		I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
 		I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -460,16 +464,20 @@
 	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
 		I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
 			   ~DPLL_VCO_ENABLE);
-		DRM_UDELAY(150);
+		POSTING_READ(dpll_b_reg);
+		udelay(150);
 	}
 	I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
 	I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
 	/* Actually enable it */
 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
-	DRM_UDELAY(150);
-	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+	POSTING_READ(dpll_b_reg);
+	udelay(150);
+	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
-	DRM_UDELAY(150);
+		POSTING_READ(DPLL_B_MD);
+	}
+	udelay(150);
 
 	/* Restore mode */
 	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
@@ -478,10 +486,10 @@
 	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
 	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
 	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
-	if (!IS_IRONLAKE(dev))
+	if (!HAS_PCH_SPLIT(dev))
 		I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
 		I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
 		I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +554,14 @@
 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
 
 	/* CRT state */
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->saveADPA = I915_READ(PCH_ADPA);
 	} else {
 		dev_priv->saveADPA = I915_READ(ADPA);
 	}
 
 	/* LVDS state */
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
 		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
 		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +579,10 @@
 			dev_priv->saveLVDS = I915_READ(LVDS);
 	}
 
-	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
+	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
 		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
 		dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
 		dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -602,7 +610,7 @@
 
 	/* Only save FBC state on the platform that supports FBC */
 	if (I915_HAS_FBC(dev)) {
-		if (IS_IRONLAKE_M(dev)) {
+		if (HAS_PCH_SPLIT(dev)) {
 			dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
 		} else if (IS_GM45(dev)) {
 			dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
@@ -618,7 +626,7 @@
 	dev_priv->saveVGA0 = I915_READ(VGA0);
 	dev_priv->saveVGA1 = I915_READ(VGA1);
 	dev_priv->saveVGA_PD = I915_READ(VGA_PD);
-	if (IS_IRONLAKE(dev))
+	if (HAS_PCH_SPLIT(dev))
 		dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
 	else
 		dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -660,24 +668,24 @@
 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
 
 	/* CRT state */
-	if (IS_IRONLAKE(dev))
+	if (HAS_PCH_SPLIT(dev))
 		I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
 	else
 		I915_WRITE(ADPA, dev_priv->saveADPA);
 
 	/* LVDS state */
-	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
 		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
 	} else if (IS_MOBILE(dev) && !IS_I830(dev))
 		I915_WRITE(LVDS, dev_priv->saveLVDS);
 
-	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
+	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
 		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
 
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
 		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
 		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -708,7 +716,7 @@
 
 	/* only restore FBC info on the platform that supports FBC*/
 	if (I915_HAS_FBC(dev)) {
-		if (IS_IRONLAKE_M(dev)) {
+		if (HAS_PCH_SPLIT(dev)) {
 			ironlake_disable_fbc(dev);
 			I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
 		} else if (IS_GM45(dev)) {
@@ -723,14 +731,15 @@
 		}
 	}
 	/* VGA state */
-	if (IS_IRONLAKE(dev))
+	if (HAS_PCH_SPLIT(dev))
 		I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
 	else
 		I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
 	I915_WRITE(VGA0, dev_priv->saveVGA0);
 	I915_WRITE(VGA1, dev_priv->saveVGA1);
 	I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
-	DRM_UDELAY(150);
+	POSTING_READ(VGA_PD);
+	udelay(150);
 
 	i915_restore_vga(dev);
 }
@@ -748,7 +757,7 @@
 	i915_save_display(dev);
 
 	/* Interrupt state */
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->saveDEIER = I915_READ(DEIER);
 		dev_priv->saveDEIMR = I915_READ(DEIMR);
 		dev_priv->saveGTIER = I915_READ(GTIER);
@@ -762,7 +771,7 @@
 		dev_priv->saveIMR = I915_READ(IMR);
 	}
 
-	if (IS_IRONLAKE_M(dev))
+	if (HAS_PCH_SPLIT(dev))
 		ironlake_disable_drps(dev);
 
 	/* Cache mode state */
@@ -820,7 +829,7 @@
 	i915_restore_display(dev);
 
 	/* Interrupt state */
-	if (IS_IRONLAKE(dev)) {
+	if (HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(DEIER, dev_priv->saveDEIER);
 		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
 		I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -835,7 +844,7 @@
 	/* Clock gating state */
 	intel_init_clock_gating(dev);
 
-	if (IS_IRONLAKE_M(dev))
+	if (HAS_PCH_SPLIT(dev))
 		ironlake_enable_drps(dev);
 
 	/* Cache mode state */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ee0732b..4b77351 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -160,19 +160,20 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 adpa, temp;
 	bool ret;
+	bool turn_off_dac = false;
 
 	temp = adpa = I915_READ(PCH_ADPA);
 
-	if (HAS_PCH_CPT(dev)) {
-		/* Disable DAC before force detect */
-		I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
-		(void)I915_READ(PCH_ADPA);
-	} else {
-		adpa &= ~ADPA_CRT_HOTPLUG_MASK;
-		/* disable HPD first */
-		I915_WRITE(PCH_ADPA, adpa);
-		(void)I915_READ(PCH_ADPA);
-	}
+	if (HAS_PCH_SPLIT(dev))
+		turn_off_dac = true;
+
+	adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+	if (turn_off_dac)
+		adpa &= ~ADPA_DAC_ENABLE;
+
+	/* disable HPD first */
+	I915_WRITE(PCH_ADPA, adpa);
+	(void)I915_READ(PCH_ADPA);
 
 	adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
 			ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -185,10 +186,11 @@
 	DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
 	I915_WRITE(PCH_ADPA, adpa);
 
-	while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
-		;
+	if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+		     1000, 1))
+		DRM_ERROR("timed out waiting for FORCE_TRIGGER");
 
-	if (HAS_PCH_CPT(dev)) {
+	if (turn_off_dac) {
 		I915_WRITE(PCH_ADPA, temp);
 		(void)I915_READ(PCH_ADPA);
 	}
@@ -237,17 +239,13 @@
 	hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
 
 	for (i = 0; i < tries ; i++) {
-		unsigned long timeout;
 		/* turn on the FORCE_DETECT */
 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-		timeout = jiffies + msecs_to_jiffies(1000);
 		/* wait for FORCE_DETECT to go off */
-		do {
-			if (!(I915_READ(PORT_HOTPLUG_EN) &
-					CRT_HOTPLUG_FORCE_DETECT))
-				break;
-			msleep(1);
-		} while (time_after(timeout, jiffies));
+		if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+			      CRT_HOTPLUG_FORCE_DETECT) == 0,
+			     1000, 1))
+			DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
 	}
 
 	stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -331,7 +329,7 @@
 		I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
 		/* Wait for next Vblank to substitue
 		 * border color for Color info */
-		intel_wait_for_vblank(dev);
+		intel_wait_for_vblank(dev, pipe);
 		st00 = I915_READ8(VGA_MSR_WRITE);
 		status = ((st00 & (1 << 4)) != 0) ?
 			connector_status_connected :
@@ -508,17 +506,8 @@
 	.best_encoder = intel_attached_encoder,
 };
 
-static void intel_crt_enc_destroy(struct drm_encoder *encoder)
-{
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
-	intel_i2c_destroy(intel_encoder->ddc_bus);
-	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
-}
-
 static const struct drm_encoder_funcs intel_crt_enc_funcs = {
-	.destroy = intel_crt_enc_destroy,
+	.destroy = intel_encoder_destroy,
 };
 
 void intel_crt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5ec10e0..23157e1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,6 +29,7 @@
 #include <linux/i2c.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/vgaarb.h>
 #include "drmP.h"
 #include "intel_drv.h"
 #include "i915_drm.h"
@@ -976,14 +977,54 @@
     return true;
 }
 
-void
-intel_wait_for_vblank(struct drm_device *dev)
+/**
+ * intel_wait_for_vblank - wait for vblank on a given pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * Wait for vblank to occur on a given pipe.  Needed for various bits of
+ * mode setting code.
+ */
+void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 {
-	/* Wait for 20ms, i.e. one cycle at 50hz. */
-	if (in_dbg_master())
-		mdelay(20); /* The kernel debugger cannot call msleep() */
-	else
-		msleep(20);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
+
+	/* Wait for vblank interrupt bit to set */
+	if (wait_for((I915_READ(pipestat_reg) &
+		      PIPE_VBLANK_INTERRUPT_STATUS) == 0,
+		     50, 0))
+		DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/**
+ * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * So this function waits for the display line value to settle (it
+ * usually ends up stopping at the start of the next frame).
+ */
+void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+	u32 last_line;
+
+	/* Wait for the display line to settle */
+	do {
+		last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+		mdelay(5);
+	} while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+		 time_after(timeout, jiffies));
+
+	if (time_after(jiffies, timeout))
+		DRM_DEBUG_KMS("vblank wait timed out\n");
 }
 
 /* Parameters have changed, update FBC info */
@@ -1037,7 +1078,6 @@
 void i8xx_disable_fbc(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long timeout = jiffies + msecs_to_jiffies(1);
 	u32 fbc_ctl;
 
 	if (!I915_HAS_FBC(dev))
@@ -1052,16 +1092,11 @@
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
 	/* Wait for compressing bit to clear */
-	while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
-		if (time_after(jiffies, timeout)) {
-			DRM_DEBUG_DRIVER("FBC idle timed out\n");
-			break;
-		}
-		; /* do nothing */
+	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
+		DRM_DEBUG_KMS("FBC idle timed out\n");
+		return;
 	}
 
-	intel_wait_for_vblank(dev);
-
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
@@ -1118,7 +1153,6 @@
 	dpfc_ctl = I915_READ(DPFC_CONTROL);
 	dpfc_ctl &= ~DPFC_CTL_EN;
 	I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-	intel_wait_for_vblank(dev);
 
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
@@ -1179,7 +1213,6 @@
 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
 	dpfc_ctl &= ~DPFC_CTL_EN;
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-	intel_wait_for_vblank(dev);
 
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
@@ -1478,7 +1511,7 @@
 	if ((IS_I965G(dev) || plane == 0))
 		intel_update_fbc(crtc, &crtc->mode);
 
-	intel_wait_for_vblank(dev);
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
 	intel_increase_pllclock(crtc, true);
 
 	return 0;
@@ -1585,20 +1618,18 @@
 		      Start, Offset, x, y, crtc->fb->pitch);
 	I915_WRITE(dspstride, crtc->fb->pitch);
 	if (IS_I965G(dev)) {
-		I915_WRITE(dspbase, Offset);
-		I915_READ(dspbase);
 		I915_WRITE(dspsurf, Start);
-		I915_READ(dspsurf);
 		I915_WRITE(dsptileoff, (y << 16) | x);
+		I915_WRITE(dspbase, Offset);
 	} else {
 		I915_WRITE(dspbase, Start + Offset);
-		I915_READ(dspbase);
 	}
+	POSTING_READ(dspbase);
 
 	if ((IS_I965G(dev) || plane == 0))
 		intel_update_fbc(crtc, &crtc->mode);
 
-	intel_wait_for_vblank(dev);
+	intel_wait_for_vblank(dev, pipe);
 
 	if (old_fb) {
 		intel_fb = to_intel_framebuffer(old_fb);
@@ -1627,54 +1658,6 @@
 	return 0;
 }
 
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga (struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u8 sr1;
-	u32 vga_reg;
-
-	if (HAS_PCH_SPLIT(dev))
-		vga_reg = CPU_VGACNTRL;
-	else
-		vga_reg = VGACNTRL;
-
-	if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
-		return;
-
-	I915_WRITE8(VGA_SR_INDEX, 1);
-	sr1 = I915_READ8(VGA_SR_DATA);
-	I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
-	udelay(100);
-
-	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
-}
-
-static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 dpa_ctl;
-
-	DRM_DEBUG_KMS("\n");
-	dpa_ctl = I915_READ(DP_A);
-	dpa_ctl &= ~DP_PLL_ENABLE;
-	I915_WRITE(DP_A, dpa_ctl);
-}
-
-static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 dpa_ctl;
-
-	dpa_ctl = I915_READ(DP_A);
-	dpa_ctl |= DP_PLL_ENABLE;
-	I915_WRITE(DP_A, dpa_ctl);
-	udelay(200);
-}
-
-
 static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
 {
 	struct drm_device *dev = crtc->dev;
@@ -1945,7 +1928,6 @@
 	int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
 	int trans_dpll_sel = (pipe == 0) ? 0 : 1;
 	u32 temp;
-	int n;
 	u32 pipe_bpc;
 
 	temp = I915_READ(pipeconf_reg);
@@ -1958,7 +1940,7 @@
 	case DRM_MODE_DPMS_ON:
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
-		DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
+		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
 
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 			temp = I915_READ(PCH_LVDS);
@@ -1968,10 +1950,7 @@
 			}
 		}
 
-		if (HAS_eDP) {
-			/* enable eDP PLL */
-			ironlake_enable_pll_edp(crtc);
-		} else {
+		if (!HAS_eDP) {
 
 			/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
 			temp = I915_READ(fdi_rx_reg);
@@ -2005,15 +1984,13 @@
 		/* Enable panel fitting for LVDS */
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
 		    || HAS_eDP || intel_pch_has_edp(crtc)) {
-			temp = I915_READ(pf_ctl_reg);
-			I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
-
-			/* currently full aspect */
-			I915_WRITE(pf_win_pos, 0);
-
-			I915_WRITE(pf_win_size,
-				   (dev_priv->panel_fixed_mode->hdisplay << 16) |
-				   (dev_priv->panel_fixed_mode->vdisplay));
+			if (dev_priv->pch_pf_size) {
+				temp = I915_READ(pf_ctl_reg);
+				I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
+				I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos);
+				I915_WRITE(pf_win_size, dev_priv->pch_pf_size);
+			} else
+				I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
 		}
 
 		/* Enable CPU pipe */
@@ -2097,9 +2074,10 @@
 				int reg;
 
 				reg = I915_READ(trans_dp_ctl);
-				reg &= ~TRANS_DP_PORT_SEL_MASK;
-				reg = TRANS_DP_OUTPUT_ENABLE |
-				      TRANS_DP_ENH_FRAMING;
+				reg &= ~(TRANS_DP_PORT_SEL_MASK |
+					 TRANS_DP_SYNC_MASK);
+				reg |= (TRANS_DP_OUTPUT_ENABLE |
+					TRANS_DP_ENH_FRAMING);
 
 				if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
 				      reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2137,18 +2115,17 @@
 			I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
 			I915_READ(transconf_reg);
 
-			while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
-				;
-
+			if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0))
+				DRM_ERROR("failed to enable transcoder\n");
 		}
 
 		intel_crtc_load_lut(crtc);
 
 		intel_update_fbc(crtc, &crtc->mode);
+		break;
 
-	break;
 	case DRM_MODE_DPMS_OFF:
-		DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
 
 		drm_vblank_off(dev, pipe);
 		/* Disable display plane */
@@ -2164,26 +2141,14 @@
 		    dev_priv->display.disable_fbc)
 			dev_priv->display.disable_fbc(dev);
 
-		i915_disable_vga(dev);
-
 		/* disable cpu pipe, disable after all planes disabled */
 		temp = I915_READ(pipeconf_reg);
 		if ((temp & PIPEACONF_ENABLE) != 0) {
 			I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-			I915_READ(pipeconf_reg);
-			n = 0;
+
 			/* wait for cpu pipe off, pipe state */
-			while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
-				n++;
-				if (n < 60) {
-					udelay(500);
-					continue;
-				} else {
-					DRM_DEBUG_KMS("pipe %d off delay\n",
-								pipe);
-					break;
-				}
-			}
+			if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
+				DRM_ERROR("failed to turn off cpu pipe\n");
 		} else
 			DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
 
@@ -2244,20 +2209,10 @@
 		temp = I915_READ(transconf_reg);
 		if ((temp & TRANS_ENABLE) != 0) {
 			I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
-			I915_READ(transconf_reg);
-			n = 0;
+
 			/* wait for PCH transcoder off, transcoder state */
-			while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
-				n++;
-				if (n < 60) {
-					udelay(500);
-					continue;
-				} else {
-					DRM_DEBUG_KMS("transcoder %d off "
-							"delay\n", pipe);
-					break;
-				}
-			}
+			if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
+				DRM_ERROR("failed to disable transcoder\n");
 		}
 
 		temp = I915_READ(transconf_reg);
@@ -2294,10 +2249,6 @@
 		I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
 		I915_READ(pch_dpll_reg);
 
-		if (HAS_eDP) {
-			ironlake_disable_pll_edp(crtc);
-		}
-
 		/* Switch from PCDclk to Rawclk */
 		temp = I915_READ(fdi_rx_reg);
 		temp &= ~FDI_SEL_PCDCLK;
@@ -2372,8 +2323,6 @@
 	case DRM_MODE_DPMS_ON:
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
-		intel_update_watermarks(dev);
-
 		/* Enable the DPLL */
 		temp = I915_READ(dpll_reg);
 		if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -2413,8 +2362,6 @@
 		intel_crtc_dpms_overlay(intel_crtc, true);
 	break;
 	case DRM_MODE_DPMS_OFF:
-		intel_update_watermarks(dev);
-
 		/* Give the overlay scaler a chance to disable if it's on this pipe */
 		intel_crtc_dpms_overlay(intel_crtc, false);
 		drm_vblank_off(dev, pipe);
@@ -2423,9 +2370,6 @@
 		    dev_priv->display.disable_fbc)
 			dev_priv->display.disable_fbc(dev);
 
-		/* Disable the VGA plane that we never use */
-		i915_disable_vga(dev);
-
 		/* Disable display plane */
 		temp = I915_READ(dspcntr_reg);
 		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2435,10 +2379,8 @@
 			I915_READ(dspbase_reg);
 		}
 
-		if (!IS_I9XX(dev)) {
-			/* Wait for vblank for the disable to take effect */
-			intel_wait_for_vblank(dev);
-		}
+		/* Wait for vblank for the disable to take effect */
+		intel_wait_for_vblank_off(dev, pipe);
 
 		/* Don't disable pipe A or pipe A PLLs if needed */
 		if (pipeconf_reg == PIPEACONF &&
@@ -2453,7 +2395,7 @@
 		}
 
 		/* Wait for vblank for the disable to take effect. */
-		intel_wait_for_vblank(dev);
+		intel_wait_for_vblank_off(dev, pipe);
 
 		temp = I915_READ(dpll_reg);
 		if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2469,9 +2411,6 @@
 
 /**
  * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
  */
 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
@@ -2482,9 +2421,26 @@
 	int pipe = intel_crtc->pipe;
 	bool enabled;
 
+	intel_crtc->dpms_mode = mode;
+	intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
+
+	/* When switching on the display, ensure that SR is disabled
+	 * with multiple pipes prior to enabling to new pipe.
+	 *
+	 * When switching off the display, make sure the cursor is
+	 * properly hidden prior to disabling the pipe.
+	 */
+	if (mode == DRM_MODE_DPMS_ON)
+		intel_update_watermarks(dev);
+	else
+		intel_crtc_update_cursor(crtc);
+
 	dev_priv->display.dpms(crtc, mode);
 
-	intel_crtc->dpms_mode = mode;
+	if (mode == DRM_MODE_DPMS_ON)
+		intel_crtc_update_cursor(crtc);
+	else
+		intel_update_watermarks(dev);
 
 	if (!dev->primary->master)
 		return;
@@ -2536,6 +2492,20 @@
 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
 }
 
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+	if (intel_encoder->ddc_bus)
+		intel_i2c_destroy(intel_encoder->ddc_bus);
+
+	if (intel_encoder->i2c_bus)
+		intel_i2c_destroy(intel_encoder->i2c_bus);
+
+	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
+}
+
 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
 				  struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
@@ -2867,7 +2837,7 @@
 	unsigned long cursor_hpll_disable;
 };
 
-static struct cxsr_latency cxsr_latency_table[] = {
+static const struct cxsr_latency cxsr_latency_table[] = {
 	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
 	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
 	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
@@ -2905,11 +2875,13 @@
 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
 };
 
-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, 
-						   int fsb, int mem)
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+							 int is_ddr3,
+							 int fsb,
+							 int mem)
 {
+	const struct cxsr_latency *latency;
 	int i;
-	struct cxsr_latency *latency;
 
 	if (fsb == 0 || mem == 0)
 		return NULL;
@@ -2930,13 +2902,9 @@
 static void pineview_disable_cxsr(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 reg;
 
 	/* deactivate cxsr */
-	reg = I915_READ(DSPFW3);
-	reg &= ~(PINEVIEW_SELF_REFRESH_EN);
-	I915_WRITE(DSPFW3, reg);
-	DRM_INFO("Big FIFO is disabled\n");
+	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
 }
 
 /*
@@ -3024,12 +2992,12 @@
 			  int pixel_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct cxsr_latency *latency;
 	u32 reg;
 	unsigned long wm;
-	struct cxsr_latency *latency;
 	int sr_clock;
 
-	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 
+	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
 					 dev_priv->fsb_freq, dev_priv->mem_freq);
 	if (!latency) {
 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
@@ -3075,9 +3043,8 @@
 		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
 		/* activate cxsr */
-		reg = I915_READ(DSPFW3);
-		reg |= PINEVIEW_SELF_REFRESH_EN;
-		I915_WRITE(DSPFW3, reg);
+		I915_WRITE(DSPFW3,
+			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
 		DRM_DEBUG_KMS("Self-refresh is enabled\n");
 	} else {
 		pineview_disable_cxsr(dev);
@@ -3354,12 +3321,11 @@
 	int line_count;
 	int planea_htotal = 0, planeb_htotal = 0;
 	struct drm_crtc *crtc;
-	struct intel_crtc *intel_crtc;
 
 	/* Need htotal for all active display plane */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		intel_crtc = to_intel_crtc(crtc);
-		if (crtc->enabled) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
 			if (intel_crtc->plane == 0)
 				planea_htotal = crtc->mode.htotal;
 			else
@@ -3519,7 +3485,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
-	struct intel_crtc *intel_crtc;
 	int sr_hdisplay = 0;
 	unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
 	int enabled = 0, pixel_size = 0;
@@ -3530,8 +3495,8 @@
 
 	/* Get the clock config from both planes */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		intel_crtc = to_intel_crtc(crtc);
-		if (crtc->enabled) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
 			enabled++;
 			if (intel_crtc->plane == 0) {
 				DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
@@ -3966,9 +3931,7 @@
 		dpll_reg = pch_dpll_reg;
 	}
 
-	if (is_edp) {
-		ironlake_disable_pll_edp(crtc);
-	} else if ((dpll & DPLL_VCO_ENABLE)) {
+	if (!is_edp) {
 		I915_WRITE(fp_reg, fp);
 		I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
 		I915_READ(dpll_reg);
@@ -4167,7 +4130,7 @@
 	I915_WRITE(pipeconf_reg, pipeconf);
 	I915_READ(pipeconf_reg);
 
-	intel_wait_for_vblank(dev);
+	intel_wait_for_vblank(dev, pipe);
 
 	if (IS_IRONLAKE(dev)) {
 		/* enable address swizzle for tiling buffer */
@@ -4180,9 +4143,6 @@
 	/* Flush the plane changes */
 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
-	if ((IS_I965G(dev) || plane == 0))
-		intel_update_fbc(crtc, &crtc->mode);
-
 	intel_update_watermarks(dev);
 
 	drm_vblank_post_modeset(dev, pipe);
@@ -4216,6 +4176,62 @@
 	}
 }
 
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	bool visible = base != 0;
+	u32 cntl;
+
+	if (intel_crtc->cursor_visible == visible)
+		return;
+
+	cntl = I915_READ(CURACNTR);
+	if (visible) {
+		/* On these chipsets we can only modify the base whilst
+		 * the cursor is disabled.
+		 */
+		I915_WRITE(CURABASE, base);
+
+		cntl &= ~(CURSOR_FORMAT_MASK);
+		/* XXX width must be 64, stride 256 => 0x00 << 28 */
+		cntl |= CURSOR_ENABLE |
+			CURSOR_GAMMA_ENABLE |
+			CURSOR_FORMAT_ARGB;
+	} else
+		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+	I915_WRITE(CURACNTR, cntl);
+
+	intel_crtc->cursor_visible = visible;
+}
+
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	bool visible = base != 0;
+
+	if (intel_crtc->cursor_visible != visible) {
+		uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+		if (base) {
+			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+			cntl |= pipe << 28; /* Connect to correct pipe */
+		} else {
+			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+			cntl |= CURSOR_MODE_DISABLE;
+		}
+		I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+
+		intel_crtc->cursor_visible = visible;
+	}
+	/* and commit changes on next vblank */
+	I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+}
+
 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
 static void intel_crtc_update_cursor(struct drm_crtc *crtc)
 {
@@ -4225,12 +4241,12 @@
 	int pipe = intel_crtc->pipe;
 	int x = intel_crtc->cursor_x;
 	int y = intel_crtc->cursor_y;
-	uint32_t base, pos;
+	u32 base, pos;
 	bool visible;
 
 	pos = 0;
 
-	if (crtc->fb) {
+	if (intel_crtc->cursor_on && crtc->fb) {
 		base = intel_crtc->cursor_addr;
 		if (x > (int) crtc->fb->width)
 			base = 0;
@@ -4259,37 +4275,14 @@
 	pos |= y << CURSOR_Y_SHIFT;
 
 	visible = base != 0;
-	if (!visible && !intel_crtc->cursor_visble)
+	if (!visible && !intel_crtc->cursor_visible)
 		return;
 
 	I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
-	if (intel_crtc->cursor_visble != visible) {
-		uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
-		if (base) {
-			/* Hooray for CUR*CNTR differences */
-			if (IS_MOBILE(dev) || IS_I9XX(dev)) {
-				cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
-				cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
-				cntl |= pipe << 28; /* Connect to correct pipe */
-			} else {
-				cntl &= ~(CURSOR_FORMAT_MASK);
-				cntl |= CURSOR_ENABLE;
-				cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
-			}
-		} else {
-			if (IS_MOBILE(dev) || IS_I9XX(dev)) {
-				cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
-				cntl |= CURSOR_MODE_DISABLE;
-			} else {
-				cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
-			}
-		}
-		I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
-
-		intel_crtc->cursor_visble = visible;
-	}
-	/* and commit changes on next vblank */
-	I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+	if (IS_845G(dev) || IS_I865G(dev))
+		i845_update_cursor(crtc, base);
+	else
+		i9xx_update_cursor(crtc, base);
 
 	if (visible)
 		intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
@@ -4354,8 +4347,10 @@
 
 		addr = obj_priv->gtt_offset;
 	} else {
+		int align = IS_I830(dev) ? 16 * 1024 : 256;
 		ret = i915_gem_attach_phys_object(dev, bo,
-						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
+						  align);
 		if (ret) {
 			DRM_ERROR("failed to attach phys object\n");
 			goto fail_locked;
@@ -4544,7 +4539,7 @@
 		encoder_funcs->commit(encoder);
 	}
 	/* let the connector get through one full cycle before testing */
-	intel_wait_for_vblank(dev);
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 	return crtc;
 }
@@ -4749,7 +4744,7 @@
 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
 		I915_WRITE(dpll_reg, dpll);
 		dpll = I915_READ(dpll_reg);
-		intel_wait_for_vblank(dev);
+		intel_wait_for_vblank(dev, pipe);
 		dpll = I915_READ(dpll_reg);
 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4793,7 +4788,7 @@
 		dpll |= DISPLAY_RATE_SELECT_FPA1;
 		I915_WRITE(dpll_reg, dpll);
 		dpll = I915_READ(dpll_reg);
-		intel_wait_for_vblank(dev);
+		intel_wait_for_vblank(dev, pipe);
 		dpll = I915_READ(dpll_reg);
 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
@@ -5083,14 +5078,16 @@
 	work->pending_flip_obj = obj;
 
 	if (intel_crtc->plane)
-		flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
-		flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
 
-	/* Wait for any previous flip to finish */
-	if (IS_GEN3(dev))
-		while (I915_READ(ISR) & flip_mask)
-			;
+	if (IS_GEN3(dev) || IS_GEN2(dev)) {
+		BEGIN_LP_RING(2);
+		OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
 
 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
 	offset = obj_priv->gtt_offset;
@@ -5104,12 +5101,18 @@
 		OUT_RING(offset | obj_priv->tiling_mode);
 		pipesrc = I915_READ(pipesrc_reg); 
 		OUT_RING(pipesrc & 0x0fff0fff);
-	} else {
+	} else if (IS_GEN3(dev)) {
 		OUT_RING(MI_DISPLAY_FLIP_I915 |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
 		OUT_RING(offset);
 		OUT_RING(MI_NOOP);
+	} else {
+		OUT_RING(MI_DISPLAY_FLIP |
+			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+		OUT_RING(fb->pitch);
+		OUT_RING(offset);
+		OUT_RING(MI_NOOP);
 	}
 	ADVANCE_LP_RING();
 
@@ -5432,37 +5435,37 @@
 };
 
 static struct drm_gem_object *
-intel_alloc_power_context(struct drm_device *dev)
+intel_alloc_context_page(struct drm_device *dev)
 {
-	struct drm_gem_object *pwrctx;
+	struct drm_gem_object *ctx;
 	int ret;
 
-	pwrctx = i915_gem_alloc_object(dev, 4096);
-	if (!pwrctx) {
+	ctx = i915_gem_alloc_object(dev, 4096);
+	if (!ctx) {
 		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
 		return NULL;
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_object_pin(pwrctx, 4096);
+	ret = i915_gem_object_pin(ctx, 4096);
 	if (ret) {
 		DRM_ERROR("failed to pin power context: %d\n", ret);
 		goto err_unref;
 	}
 
-	ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
 	if (ret) {
 		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
 		goto err_unpin;
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	return pwrctx;
+	return ctx;
 
 err_unpin:
-	i915_gem_object_unpin(pwrctx);
+	i915_gem_object_unpin(ctx);
 err_unref:
-	drm_gem_object_unreference(pwrctx);
+	drm_gem_object_unreference(ctx);
 	mutex_unlock(&dev->struct_mutex);
 	return NULL;
 }
@@ -5494,7 +5497,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 rgvmodectl = I915_READ(MEMMODECTL);
 	u8 fmax, fmin, fstart, vstart;
-	int i = 0;
 
 	/* 100ms RC evaluation intervals */
 	I915_WRITE(RCUPEI, 100000);
@@ -5538,13 +5540,8 @@
 	rgvmodectl |= MEMMODE_SWMODE_EN;
 	I915_WRITE(MEMMODECTL, rgvmodectl);
 
-	while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
-		if (i++ > 100) {
-			DRM_ERROR("stuck trying to change perf mode\n");
-			break;
-		}
-		msleep(1);
-	}
+	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
+		DRM_ERROR("stuck trying to change perf mode\n");
 	msleep(1);
 
 	ironlake_set_drps(dev, fstart);
@@ -5725,7 +5722,8 @@
 				   ILK_DPFC_DIS2 |
 				   ILK_CLK_FBC);
 		}
-		return;
+		if (IS_GEN6(dev))
+			return;
 	} else if (IS_G4X(dev)) {
 		uint32_t dspclk_gate;
 		I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5768,6 +5766,31 @@
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
 	 */
+	if (IS_IRONLAKE_M(dev)) {
+		if (dev_priv->renderctx == NULL)
+			dev_priv->renderctx = intel_alloc_context_page(dev);
+		if (dev_priv->renderctx) {
+			struct drm_i915_gem_object *obj_priv;
+			obj_priv = to_intel_bo(dev_priv->renderctx);
+			if (obj_priv) {
+				BEGIN_LP_RING(4);
+				OUT_RING(MI_SET_CONTEXT);
+				OUT_RING(obj_priv->gtt_offset |
+						MI_MM_SPACE_GTT |
+						MI_SAVE_EXT_STATE_EN |
+						MI_RESTORE_EXT_STATE_EN |
+						MI_RESTORE_INHIBIT);
+				OUT_RING(MI_NOOP);
+				OUT_RING(MI_FLUSH);
+				ADVANCE_LP_RING();
+			}
+		} else {
+			DRM_DEBUG_KMS("Failed to allocate render context."
+				      "Disable RC6\n");
+			return;
+		}
+	}
+
 	if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
 		struct drm_i915_gem_object *obj_priv = NULL;
 
@@ -5776,7 +5799,7 @@
 		} else {
 			struct drm_gem_object *pwrctx;
 
-			pwrctx = intel_alloc_power_context(dev);
+			pwrctx = intel_alloc_context_page(dev);
 			if (pwrctx) {
 				dev_priv->pwrctx = pwrctx;
 				obj_priv = to_intel_bo(pwrctx);
@@ -5948,6 +5971,29 @@
 	}
 }
 
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u8 sr1;
+	u32 vga_reg;
+
+	if (HAS_PCH_SPLIT(dev))
+		vga_reg = CPU_VGACNTRL;
+	else
+		vga_reg = VGACNTRL;
+
+	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	outb(1, VGA_SR_INDEX);
+	sr1 = inb(VGA_SR_DATA);
+	outb(sr1 | 1<<5, VGA_SR_DATA);
+	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	udelay(300);
+
+	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+	POSTING_READ(vga_reg);
+}
+
 void intel_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5996,6 +6042,9 @@
 
 	intel_init_clock_gating(dev);
 
+	/* Just disable it once at startup */
+	i915_disable_vga(dev);
+
 	if (IS_IRONLAKE_M(dev)) {
 		ironlake_enable_drps(dev);
 		intel_init_emon(dev);
@@ -6034,6 +6083,16 @@
 	if (dev_priv->display.disable_fbc)
 		dev_priv->display.disable_fbc(dev);
 
+	if (dev_priv->renderctx) {
+		struct drm_i915_gem_object *obj_priv;
+
+		obj_priv = to_intel_bo(dev_priv->renderctx);
+		I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
+		I915_READ(CCID);
+		i915_gem_object_unpin(dev_priv->renderctx);
+		drm_gem_object_unreference(dev_priv->renderctx);
+	}
+
 	if (dev_priv->pwrctx) {
 		struct drm_i915_gem_object *obj_priv;
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 40be1fa..9caccd0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,10 +42,11 @@
 
 #define DP_LINK_CONFIGURATION_SIZE	9
 
-#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
-#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
+#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
+#define IS_PCH_eDP(i) ((i)->is_pch_edp)
 
-struct intel_dp_priv {
+struct intel_dp {
+	struct intel_encoder base;
 	uint32_t output_reg;
 	uint32_t DP;
 	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
@@ -54,40 +55,39 @@
 	uint8_t link_bw;
 	uint8_t lane_count;
 	uint8_t dpcd[4];
-	struct intel_encoder *intel_encoder;
 	struct i2c_adapter adapter;
 	struct i2c_algo_dp_aux_data algo;
 	bool is_pch_edp;
 };
 
-static void
-intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
-		    uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
+static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+	return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
+}
 
-static void
-intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
+static void intel_dp_link_train(struct intel_dp *intel_dp);
+static void intel_dp_link_down(struct intel_dp *intel_dp);
 
 void
 intel_edp_link_config (struct intel_encoder *intel_encoder,
-		int *lane_num, int *link_bw)
+		       int *lane_num, int *link_bw)
 {
-	struct intel_dp_priv   *dp_priv = intel_encoder->dev_priv;
+	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
 
-	*lane_num = dp_priv->lane_count;
-	if (dp_priv->link_bw == DP_LINK_BW_1_62)
+	*lane_num = intel_dp->lane_count;
+	if (intel_dp->link_bw == DP_LINK_BW_1_62)
 		*link_bw = 162000;
-	else if (dp_priv->link_bw == DP_LINK_BW_2_7)
+	else if (intel_dp->link_bw == DP_LINK_BW_2_7)
 		*link_bw = 270000;
 }
 
 static int
-intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
+intel_dp_max_lane_count(struct intel_dp *intel_dp)
 {
-	struct intel_dp_priv   *dp_priv = intel_encoder->dev_priv;
 	int max_lane_count = 4;
 
-	if (dp_priv->dpcd[0] >= 0x11) {
-		max_lane_count = dp_priv->dpcd[2] & 0x1f;
+	if (intel_dp->dpcd[0] >= 0x11) {
+		max_lane_count = intel_dp->dpcd[2] & 0x1f;
 		switch (max_lane_count) {
 		case 1: case 2: case 4:
 			break;
@@ -99,10 +99,9 @@
 }
 
 static int
-intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
-	struct intel_dp_priv   *dp_priv = intel_encoder->dev_priv;
-	int max_link_bw = dp_priv->dpcd[1];
+	int max_link_bw = intel_dp->dpcd[1];
 
 	switch (max_link_bw) {
 	case DP_LINK_BW_1_62:
@@ -126,13 +125,11 @@
 
 /* I think this is a fiction */
 static int
-intel_dp_link_required(struct drm_device *dev,
-		       struct intel_encoder *intel_encoder, int pixel_clock)
+intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 
-	if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
+	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
 		return (pixel_clock * dev_priv->edp_bpp) / 8;
 	else
 		return pixel_clock * 3;
@@ -149,14 +146,13 @@
 		    struct drm_display_mode *mode)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
-	int max_lanes = intel_dp_max_lane_count(intel_encoder);
+	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+	int max_lanes = intel_dp_max_lane_count(intel_dp);
 
-	if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+	if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
 	    dev_priv->panel_fixed_mode) {
 		if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
 			return MODE_PANEL;
@@ -167,8 +163,8 @@
 
 	/* only refuse the mode on non eDP since we have seen some wierd eDP panels
 	   which are outside spec tolerances but somehow work by magic */
-	if (!IS_eDP(intel_encoder) &&
-	    (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
+	if (!IS_eDP(intel_dp) &&
+	    (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
 	     > intel_dp_max_data_rate(max_link_clock, max_lanes)))
 		return MODE_CLOCK_HIGH;
 
@@ -232,13 +228,12 @@
 }
 
 static int
-intel_dp_aux_ch(struct intel_encoder *intel_encoder,
+intel_dp_aux_ch(struct intel_dp *intel_dp,
 		uint8_t *send, int send_bytes,
 		uint8_t *recv, int recv_size)
 {
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-	uint32_t output_reg = dp_priv->output_reg;
-	struct drm_device *dev = intel_encoder->enc.dev;
+	uint32_t output_reg = intel_dp->output_reg;
+	struct drm_device *dev = intel_dp->base.enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t ch_ctl = output_reg + 0x10;
 	uint32_t ch_data = ch_ctl + 4;
@@ -253,7 +248,7 @@
 	 * and would like to run at 2MHz. So, take the
 	 * hrawclk value and divide by 2 and use that
 	 */
-	if (IS_eDP(intel_encoder)) {
+	if (IS_eDP(intel_dp)) {
 		if (IS_GEN6(dev))
 			aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
 		else
@@ -344,7 +339,7 @@
 
 /* Write data to the aux channel in native mode */
 static int
-intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_write(struct intel_dp *intel_dp,
 			  uint16_t address, uint8_t *send, int send_bytes)
 {
 	int ret;
@@ -361,7 +356,7 @@
 	memcpy(&msg[4], send, send_bytes);
 	msg_bytes = send_bytes + 4;
 	for (;;) {
-		ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
+		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
 		if (ret < 0)
 			return ret;
 		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -376,15 +371,15 @@
 
 /* Write a single byte to the aux channel in native mode */
 static int
-intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
 			    uint16_t address, uint8_t byte)
 {
-	return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
+	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
 }
 
 /* read bytes from a native aux channel */
 static int
-intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
+intel_dp_aux_native_read(struct intel_dp *intel_dp,
 			 uint16_t address, uint8_t *recv, int recv_bytes)
 {
 	uint8_t msg[4];
@@ -403,7 +398,7 @@
 	reply_bytes = recv_bytes + 1;
 
 	for (;;) {
-		ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
+		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
 				      reply, reply_bytes);
 		if (ret == 0)
 			return -EPROTO;
@@ -426,10 +421,9 @@
 		    uint8_t write_byte, uint8_t *read_byte)
 {
 	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
-	struct intel_dp_priv *dp_priv = container_of(adapter,
-						     struct intel_dp_priv,
-						     adapter);
-	struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
+	struct intel_dp *intel_dp = container_of(adapter,
+						struct intel_dp,
+						adapter);
 	uint16_t address = algo_data->address;
 	uint8_t msg[5];
 	uint8_t reply[2];
@@ -468,7 +462,7 @@
 	}
 
 	for (;;) {
-	  ret = intel_dp_aux_ch(intel_encoder,
+	  ret = intel_dp_aux_ch(intel_dp,
 				msg, msg_bytes,
 				reply, reply_bytes);
 		if (ret < 0) {
@@ -496,57 +490,42 @@
 }
 
 static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+intel_dp_i2c_init(struct intel_dp *intel_dp,
 		  struct intel_connector *intel_connector, const char *name)
 {
-	struct intel_dp_priv   *dp_priv = intel_encoder->dev_priv;
-
 	DRM_DEBUG_KMS("i2c_init %s\n", name);
-	dp_priv->algo.running = false;
-	dp_priv->algo.address = 0;
-	dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
+	intel_dp->algo.running = false;
+	intel_dp->algo.address = 0;
+	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
 
-	memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
-	dp_priv->adapter.owner = THIS_MODULE;
-	dp_priv->adapter.class = I2C_CLASS_DDC;
-	strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
-	dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
-	dp_priv->adapter.algo_data = &dp_priv->algo;
-	dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
-	
-	return i2c_dp_aux_add_bus(&dp_priv->adapter);
+	memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+	intel_dp->adapter.owner = THIS_MODULE;
+	intel_dp->adapter.class = I2C_CLASS_DDC;
+	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+	intel_dp->adapter.algo_data = &intel_dp->algo;
+	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+
+	return i2c_dp_aux_add_bus(&intel_dp->adapter);
 }
 
 static bool
 intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		    struct drm_display_mode *adjusted_mode)
 {
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dp_priv   *dp_priv = intel_encoder->dev_priv;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	int lane_count, clock;
-	int max_lane_count = intel_dp_max_lane_count(intel_encoder);
-	int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+	int max_lane_count = intel_dp_max_lane_count(intel_dp);
+	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
-	if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+	if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
 	    dev_priv->panel_fixed_mode) {
-		struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
-
-		adjusted_mode->hdisplay = fixed_mode->hdisplay;
-		adjusted_mode->hsync_start = fixed_mode->hsync_start;
-		adjusted_mode->hsync_end = fixed_mode->hsync_end;
-		adjusted_mode->htotal = fixed_mode->htotal;
-
-		adjusted_mode->vdisplay = fixed_mode->vdisplay;
-		adjusted_mode->vsync_start = fixed_mode->vsync_start;
-		adjusted_mode->vsync_end = fixed_mode->vsync_end;
-		adjusted_mode->vtotal = fixed_mode->vtotal;
-
-		adjusted_mode->clock = fixed_mode->clock;
-		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-
+		intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+					mode, adjusted_mode);
 		/*
 		 * the mode->clock is used to calculate the Data&Link M/N
 		 * of the pipe. For the eDP the fixed clock should be used.
@@ -558,31 +537,33 @@
 		for (clock = 0; clock <= max_clock; clock++) {
 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
-			if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
+			if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
 					<= link_avail) {
-				dp_priv->link_bw = bws[clock];
-				dp_priv->lane_count = lane_count;
-				adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+				intel_dp->link_bw = bws[clock];
+				intel_dp->lane_count = lane_count;
+				adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
 				DRM_DEBUG_KMS("Display port link bw %02x lane "
 						"count %d clock %d\n",
-				       dp_priv->link_bw, dp_priv->lane_count,
+				       intel_dp->link_bw, intel_dp->lane_count,
 				       adjusted_mode->clock);
 				return true;
 			}
 		}
 	}
 
-	if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
 		/* okay we failed just pick the highest */
-		dp_priv->lane_count = max_lane_count;
-		dp_priv->link_bw = bws[max_clock];
-		adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+		intel_dp->lane_count = max_lane_count;
+		intel_dp->link_bw = bws[max_clock];
+		adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
 		DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
 			      "count %d clock %d\n",
-			      dp_priv->link_bw, dp_priv->lane_count,
+			      intel_dp->link_bw, intel_dp->lane_count,
 			      adjusted_mode->clock);
+
 		return true;
 	}
+
 	return false;
 }
 
@@ -626,17 +607,14 @@
 	struct drm_encoder *encoder;
 
 	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-		struct intel_encoder *intel_encoder;
-		struct intel_dp_priv *dp_priv;
+		struct intel_dp *intel_dp;
 
-		if (!encoder || encoder->crtc != crtc)
+		if (encoder->crtc != crtc)
 			continue;
 
-		intel_encoder = enc_to_intel_encoder(encoder);
-		dp_priv = intel_encoder->dev_priv;
-
-		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
-			return dp_priv->is_pch_edp;
+		intel_dp = enc_to_intel_dp(encoder);
+		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+			return intel_dp->is_pch_edp;
 	}
 	return false;
 }
@@ -657,18 +635,15 @@
 	 * Find the lane count in the intel_encoder private
 	 */
 	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-		struct intel_encoder *intel_encoder;
-		struct intel_dp_priv *dp_priv;
+		struct intel_dp *intel_dp;
 
 		if (encoder->crtc != crtc)
 			continue;
 
-		intel_encoder = enc_to_intel_encoder(encoder);
-		dp_priv = intel_encoder->dev_priv;
-
-		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
-			lane_count = dp_priv->lane_count;
-			if (IS_PCH_eDP(dp_priv))
+		intel_dp = enc_to_intel_dp(encoder);
+		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+			lane_count = intel_dp->lane_count;
+			if (IS_PCH_eDP(intel_dp))
 				bpp = dev_priv->edp_bpp;
 			break;
 		}
@@ -724,107 +699,114 @@
 		  struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-	struct drm_crtc *crtc = intel_encoder->enc.crtc;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct drm_crtc *crtc = intel_dp->base.enc.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	dp_priv->DP = (DP_VOLTAGE_0_4 |
+	intel_dp->DP = (DP_VOLTAGE_0_4 |
 		       DP_PRE_EMPHASIS_0);
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
-		dp_priv->DP |= DP_SYNC_HS_HIGH;
+		intel_dp->DP |= DP_SYNC_HS_HIGH;
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
-		dp_priv->DP |= DP_SYNC_VS_HIGH;
+		intel_dp->DP |= DP_SYNC_VS_HIGH;
 
-	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
-		dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 	else
-		dp_priv->DP |= DP_LINK_TRAIN_OFF;
+		intel_dp->DP |= DP_LINK_TRAIN_OFF;
 
-	switch (dp_priv->lane_count) {
+	switch (intel_dp->lane_count) {
 	case 1:
-		dp_priv->DP |= DP_PORT_WIDTH_1;
+		intel_dp->DP |= DP_PORT_WIDTH_1;
 		break;
 	case 2:
-		dp_priv->DP |= DP_PORT_WIDTH_2;
+		intel_dp->DP |= DP_PORT_WIDTH_2;
 		break;
 	case 4:
-		dp_priv->DP |= DP_PORT_WIDTH_4;
+		intel_dp->DP |= DP_PORT_WIDTH_4;
 		break;
 	}
-	if (dp_priv->has_audio)
-		dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
+	if (intel_dp->has_audio)
+		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
-	memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
-	dp_priv->link_configuration[0] = dp_priv->link_bw;
-	dp_priv->link_configuration[1] = dp_priv->lane_count;
+	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	intel_dp->link_configuration[0] = intel_dp->link_bw;
+	intel_dp->link_configuration[1] = intel_dp->lane_count;
 
 	/*
 	 * Check for DPCD version > 1.1 and enhanced framing support
 	 */
-	if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
-		dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-		dp_priv->DP |= DP_ENHANCED_FRAMING;
+	if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+		intel_dp->DP |= DP_ENHANCED_FRAMING;
 	}
 
 	/* CPT DP's pipe select is decided in TRANS_DP_CTL */
 	if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
-		dp_priv->DP |= DP_PIPEB_SELECT;
+		intel_dp->DP |= DP_PIPEB_SELECT;
 
-	if (IS_eDP(intel_encoder)) {
+	if (IS_eDP(intel_dp)) {
 		/* don't miss out required setting for eDP */
-		dp_priv->DP |= DP_PLL_ENABLE;
+		intel_dp->DP |= DP_PLL_ENABLE;
 		if (adjusted_mode->clock < 200000)
-			dp_priv->DP |= DP_PLL_FREQ_160MHZ;
+			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
 		else
-			dp_priv->DP |= DP_PLL_FREQ_270MHZ;
+			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
 	}
 }
 
 static void ironlake_edp_panel_on (struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long timeout = jiffies + msecs_to_jiffies(5000);
-	u32 pp, pp_status;
+	u32 pp;
 
-	pp_status = I915_READ(PCH_PP_STATUS);
-	if (pp_status & PP_ON)
+	if (I915_READ(PCH_PP_STATUS) & PP_ON)
 		return;
 
 	pp = I915_READ(PCH_PP_CONTROL);
+
+	/* ILK workaround: disable reset around power sequence */
+	pp &= ~PANEL_POWER_RESET;
+	I915_WRITE(PCH_PP_CONTROL, pp);
+	POSTING_READ(PCH_PP_CONTROL);
+
 	pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
 	I915_WRITE(PCH_PP_CONTROL, pp);
-	do {
-		pp_status = I915_READ(PCH_PP_STATUS);
-	} while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
 
-	if (time_after(jiffies, timeout))
-		DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
+	if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
+		DRM_ERROR("panel on wait timed out: 0x%08x\n",
+			  I915_READ(PCH_PP_STATUS));
 
 	pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 	I915_WRITE(PCH_PP_CONTROL, pp);
+	POSTING_READ(PCH_PP_CONTROL);
 }
 
 static void ironlake_edp_panel_off (struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long timeout = jiffies + msecs_to_jiffies(5000);
-	u32 pp, pp_status;
+	u32 pp;
 
 	pp = I915_READ(PCH_PP_CONTROL);
+
+	/* ILK workaround: disable reset around power sequence */
+	pp &= ~PANEL_POWER_RESET;
+	I915_WRITE(PCH_PP_CONTROL, pp);
+	POSTING_READ(PCH_PP_CONTROL);
+
 	pp &= ~POWER_TARGET_ON;
 	I915_WRITE(PCH_PP_CONTROL, pp);
-	do {
-		pp_status = I915_READ(PCH_PP_STATUS);
-	} while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
 
-	if (time_after(jiffies, timeout))
-		DRM_DEBUG_KMS("panel off wait timed out\n");
+	if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
+		DRM_ERROR("panel off wait timed out: 0x%08x\n",
+			  I915_READ(PCH_PP_STATUS));
 
 	/* Make sure VDD is enabled so DP AUX will work */
-	pp |= EDP_FORCE_VDD;
+	pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
 	I915_WRITE(PCH_PP_CONTROL, pp);
+	POSTING_READ(PCH_PP_CONTROL);
 }
 
 static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -849,33 +831,87 @@
 	I915_WRITE(PCH_PP_CONTROL, pp);
 }
 
+static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpa_ctl;
+
+	DRM_DEBUG_KMS("\n");
+	dpa_ctl = I915_READ(DP_A);
+	dpa_ctl &= ~DP_PLL_ENABLE;
+	I915_WRITE(DP_A, dpa_ctl);
+}
+
+static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpa_ctl;
+
+	dpa_ctl = I915_READ(DP_A);
+	dpa_ctl |= DP_PLL_ENABLE;
+	I915_WRITE(DP_A, dpa_ctl);
+	udelay(200);
+}
+
+static void intel_dp_prepare(struct drm_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+	if (IS_eDP(intel_dp)) {
+		ironlake_edp_backlight_off(dev);
+		ironlake_edp_panel_on(dev);
+		ironlake_edp_pll_on(encoder);
+	}
+	if (dp_reg & DP_PORT_EN)
+		intel_dp_link_down(intel_dp);
+}
+
+static void intel_dp_commit(struct drm_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+	if (!(dp_reg & DP_PORT_EN)) {
+		intel_dp_link_train(intel_dp);
+	}
+	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+		ironlake_edp_backlight_on(dev);
+}
+
 static void
 intel_dp_dpms(struct drm_encoder *encoder, int mode)
 {
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dp_reg = I915_READ(dp_priv->output_reg);
+	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
 	if (mode != DRM_MODE_DPMS_ON) {
-		if (dp_reg & DP_PORT_EN) {
-			intel_dp_link_down(intel_encoder, dp_priv->DP);
-			if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
-				ironlake_edp_backlight_off(dev);
-				ironlake_edp_panel_off(dev);
-			}
+		if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+			ironlake_edp_backlight_off(dev);
+			ironlake_edp_panel_off(dev);
 		}
+		if (dp_reg & DP_PORT_EN)
+			intel_dp_link_down(intel_dp);
+		if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+			ironlake_edp_pll_off(encoder);
 	} else {
 		if (!(dp_reg & DP_PORT_EN)) {
-			intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
-			if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+			if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
 				ironlake_edp_panel_on(dev);
+			intel_dp_link_train(intel_dp);
+			if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
 				ironlake_edp_backlight_on(dev);
-			}
 		}
 	}
-	dp_priv->dpms_mode = mode;
+	intel_dp->dpms_mode = mode;
 }
 
 /*
@@ -883,12 +919,12 @@
  * link status information
  */
 static bool
-intel_dp_get_link_status(struct intel_encoder *intel_encoder,
+intel_dp_get_link_status(struct intel_dp *intel_dp,
 			 uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
 	int ret;
 
-	ret = intel_dp_aux_native_read(intel_encoder,
+	ret = intel_dp_aux_native_read(intel_dp,
 				       DP_LANE0_1_STATUS,
 				       link_status, DP_LINK_STATUS_SIZE);
 	if (ret != DP_LINK_STATUS_SIZE)
@@ -965,7 +1001,7 @@
 }
 
 static void
-intel_get_adjust_train(struct intel_encoder *intel_encoder,
+intel_get_adjust_train(struct intel_dp *intel_dp,
 		       uint8_t link_status[DP_LINK_STATUS_SIZE],
 		       int lane_count,
 		       uint8_t train_set[4])
@@ -1101,27 +1137,27 @@
 }
 
 static bool
-intel_dp_set_link_train(struct intel_encoder *intel_encoder,
+intel_dp_set_link_train(struct intel_dp *intel_dp,
 			uint32_t dp_reg_value,
 			uint8_t dp_train_pat,
 			uint8_t train_set[4],
 			bool first)
 {
-	struct drm_device *dev = intel_encoder->enc.dev;
+	struct drm_device *dev = intel_dp->base.enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
 	int ret;
 
-	I915_WRITE(dp_priv->output_reg, dp_reg_value);
-	POSTING_READ(dp_priv->output_reg);
+	I915_WRITE(intel_dp->output_reg, dp_reg_value);
+	POSTING_READ(intel_dp->output_reg);
 	if (first)
-		intel_wait_for_vblank(dev);
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
 
-	intel_dp_aux_native_write_1(intel_encoder,
+	intel_dp_aux_native_write_1(intel_dp,
 				    DP_TRAINING_PATTERN_SET,
 				    dp_train_pat);
 
-	ret = intel_dp_aux_native_write(intel_encoder,
+	ret = intel_dp_aux_native_write(intel_dp,
 					DP_TRAINING_LANE0_SET, train_set, 4);
 	if (ret != 4)
 		return false;
@@ -1130,12 +1166,10 @@
 }
 
 static void
-intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
-		    uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
+intel_dp_link_train(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_encoder->enc.dev;
+	struct drm_device *dev = intel_dp->base.enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	uint8_t	train_set[4];
 	uint8_t link_status[DP_LINK_STATUS_SIZE];
 	int i;
@@ -1145,13 +1179,15 @@
 	bool first = true;
 	int tries;
 	u32 reg;
+	uint32_t DP = intel_dp->DP;
 
 	/* Write the link configuration data */
-	intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
-				  link_configuration, DP_LINK_CONFIGURATION_SIZE);
+	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+				  intel_dp->link_configuration,
+				  DP_LINK_CONFIGURATION_SIZE);
 
 	DP |= DP_PORT_EN;
-	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 	else
 		DP &= ~DP_LINK_TRAIN_MASK;
@@ -1162,39 +1198,39 @@
 	for (;;) {
 		/* Use train_set[0] to set the voltage and pre emphasis values */
 		uint32_t    signal_levels;
-		if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+		if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 		} else {
-			signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+			signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
 			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
 		else
 			reg = DP | DP_LINK_TRAIN_PAT_1;
 
-		if (!intel_dp_set_link_train(intel_encoder, reg,
+		if (!intel_dp_set_link_train(intel_dp, reg,
 					     DP_TRAINING_PATTERN_1, train_set, first))
 			break;
 		first = false;
 		/* Set training pattern 1 */
 
 		udelay(100);
-		if (!intel_dp_get_link_status(intel_encoder, link_status))
+		if (!intel_dp_get_link_status(intel_dp, link_status))
 			break;
 
-		if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
+		if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
 			clock_recovery = true;
 			break;
 		}
 
 		/* Check to see if we've tried the max voltage */
-		for (i = 0; i < dp_priv->lane_count; i++)
+		for (i = 0; i < intel_dp->lane_count; i++)
 			if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
 				break;
-		if (i == dp_priv->lane_count)
+		if (i == intel_dp->lane_count)
 			break;
 
 		/* Check to see if we've tried the same voltage 5 times */
@@ -1207,7 +1243,7 @@
 		voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
 		/* Compute new train_set as requested by target */
-		intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
+		intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
 	}
 
 	/* channel equalization */
@@ -1217,30 +1253,30 @@
 		/* Use train_set[0] to set the voltage and pre emphasis values */
 		uint32_t    signal_levels;
 
-		if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+		if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 		} else {
-			signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+			signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
 			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
 		else
 			reg = DP | DP_LINK_TRAIN_PAT_2;
 
 		/* channel eq pattern */
-		if (!intel_dp_set_link_train(intel_encoder, reg,
+		if (!intel_dp_set_link_train(intel_dp, reg,
 					     DP_TRAINING_PATTERN_2, train_set,
 					     false))
 			break;
 
 		udelay(400);
-		if (!intel_dp_get_link_status(intel_encoder, link_status))
+		if (!intel_dp_get_link_status(intel_dp, link_status))
 			break;
 
-		if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
+		if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
 			channel_eq = true;
 			break;
 		}
@@ -1250,53 +1286,53 @@
 			break;
 
 		/* Compute new train_set as requested by target */
-		intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
+		intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
 		++tries;
 	}
 
-	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
 		reg = DP | DP_LINK_TRAIN_OFF_CPT;
 	else
 		reg = DP | DP_LINK_TRAIN_OFF;
 
-	I915_WRITE(dp_priv->output_reg, reg);
-	POSTING_READ(dp_priv->output_reg);
-	intel_dp_aux_native_write_1(intel_encoder,
+	I915_WRITE(intel_dp->output_reg, reg);
+	POSTING_READ(intel_dp->output_reg);
+	intel_dp_aux_native_write_1(intel_dp,
 				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
-intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
+intel_dp_link_down(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_encoder->enc.dev;
+	struct drm_device *dev = intel_dp->base.enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+	uint32_t DP = intel_dp->DP;
 
 	DRM_DEBUG_KMS("\n");
 
-	if (IS_eDP(intel_encoder)) {
+	if (IS_eDP(intel_dp)) {
 		DP &= ~DP_PLL_ENABLE;
-		I915_WRITE(dp_priv->output_reg, DP);
-		POSTING_READ(dp_priv->output_reg);
+		I915_WRITE(intel_dp->output_reg, DP);
+		POSTING_READ(intel_dp->output_reg);
 		udelay(100);
 	}
 
-	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
-		I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
-		POSTING_READ(dp_priv->output_reg);
+		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+		POSTING_READ(intel_dp->output_reg);
 	} else {
 		DP &= ~DP_LINK_TRAIN_MASK;
-		I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
-		POSTING_READ(dp_priv->output_reg);
+		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+		POSTING_READ(intel_dp->output_reg);
 	}
 
 	udelay(17000);
 
-	if (IS_eDP(intel_encoder))
+	if (IS_eDP(intel_dp))
 		DP |= DP_LINK_TRAIN_OFF;
-	I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
-	POSTING_READ(dp_priv->output_reg);
+	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+	POSTING_READ(intel_dp->output_reg);
 }
 
 /*
@@ -1309,41 +1345,39 @@
  */
 
 static void
-intel_dp_check_link_status(struct intel_encoder *intel_encoder)
+intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	uint8_t link_status[DP_LINK_STATUS_SIZE];
 
-	if (!intel_encoder->enc.crtc)
+	if (!intel_dp->base.enc.crtc)
 		return;
 
-	if (!intel_dp_get_link_status(intel_encoder, link_status)) {
-		intel_dp_link_down(intel_encoder, dp_priv->DP);
+	if (!intel_dp_get_link_status(intel_dp, link_status)) {
+		intel_dp_link_down(intel_dp);
 		return;
 	}
 
-	if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
-		intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
+	if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
+		intel_dp_link_train(intel_dp);
 }
 
 static enum drm_connector_status
 ironlake_dp_detect(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	enum drm_connector_status status;
 
 	status = connector_status_disconnected;
-	if (intel_dp_aux_native_read(intel_encoder,
-				     0x000, dp_priv->dpcd,
-				     sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+	if (intel_dp_aux_native_read(intel_dp,
+				     0x000, intel_dp->dpcd,
+				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
 	{
-		if (dp_priv->dpcd[0] != 0)
+		if (intel_dp->dpcd[0] != 0)
 			status = connector_status_connected;
 	}
-	DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
-		      dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
+	DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
+		      intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
 	return status;
 }
 
@@ -1357,19 +1391,18 @@
 intel_dp_detect(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct drm_device *dev = intel_encoder->enc.dev;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct drm_device *dev = intel_dp->base.enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	uint32_t temp, bit;
 	enum drm_connector_status status;
 
-	dp_priv->has_audio = false;
+	intel_dp->has_audio = false;
 
 	if (HAS_PCH_SPLIT(dev))
 		return ironlake_dp_detect(connector);
 
-	switch (dp_priv->output_reg) {
+	switch (intel_dp->output_reg) {
 	case DP_B:
 		bit = DPB_HOTPLUG_INT_STATUS;
 		break;
@@ -1389,11 +1422,11 @@
 		return connector_status_disconnected;
 
 	status = connector_status_disconnected;
-	if (intel_dp_aux_native_read(intel_encoder,
-				     0x000, dp_priv->dpcd,
-				     sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+	if (intel_dp_aux_native_read(intel_dp,
+				     0x000, intel_dp->dpcd,
+				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
 	{
-		if (dp_priv->dpcd[0] != 0)
+		if (intel_dp->dpcd[0] != 0)
 			status = connector_status_connected;
 	}
 	return status;
@@ -1402,18 +1435,17 @@
 static int intel_dp_get_modes(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct drm_device *dev = intel_encoder->enc.dev;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct drm_device *dev = intel_dp->base.enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 	int ret;
 
 	/* We should parse the EDID data and find out if it has an audio sink
 	 */
 
-	ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+	ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
 	if (ret) {
-		if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+		if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
 		    !dev_priv->panel_fixed_mode) {
 			struct drm_display_mode *newmode;
 			list_for_each_entry(newmode, &connector->probed_modes,
@@ -1430,7 +1462,7 @@
 	}
 
 	/* if eDP has no EDID, try to use fixed panel mode from VBT */
-	if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
 		if (dev_priv->panel_fixed_mode != NULL) {
 			struct drm_display_mode *mode;
 			mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1452,9 +1484,9 @@
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
 	.dpms = intel_dp_dpms,
 	.mode_fixup = intel_dp_mode_fixup,
-	.prepare = intel_encoder_prepare,
+	.prepare = intel_dp_prepare,
 	.mode_set = intel_dp_mode_set,
-	.commit = intel_encoder_commit,
+	.commit = intel_dp_commit,
 };
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -1470,27 +1502,17 @@
 	.best_encoder = intel_attached_encoder,
 };
 
-static void intel_dp_enc_destroy(struct drm_encoder *encoder)
-{
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
-	if (intel_encoder->i2c_bus)
-		intel_i2c_destroy(intel_encoder->i2c_bus);
-	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
-}
-
 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
-	.destroy = intel_dp_enc_destroy,
+	.destroy = intel_encoder_destroy,
 };
 
 void
 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
 {
-	struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
 
-	if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
-		intel_dp_check_link_status(intel_encoder);
+	if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
+		intel_dp_check_link_status(intel_dp);
 }
 
 /* Return which DP Port should be selected for Transcoder DP control */
@@ -1500,18 +1522,18 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_encoder *encoder;
-	struct intel_encoder *intel_encoder = NULL;
 
 	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+		struct intel_dp *intel_dp;
+
 		if (encoder->crtc != crtc)
 			continue;
 
-		intel_encoder = enc_to_intel_encoder(encoder);
-		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
-			struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-			return dp_priv->output_reg;
-		}
+		intel_dp = enc_to_intel_dp(encoder);
+		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+			return intel_dp->output_reg;
 	}
+
 	return -1;
 }
 
@@ -1540,30 +1562,28 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
+	struct intel_dp *intel_dp;
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
-	struct intel_dp_priv *dp_priv;
 	const char *name = NULL;
 	int type;
 
-	intel_encoder = kcalloc(sizeof(struct intel_encoder) +
-			       sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
-	if (!intel_encoder)
+	intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
+	if (!intel_dp)
 		return;
 
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
-		kfree(intel_encoder);
+		kfree(intel_dp);
 		return;
 	}
+	intel_encoder = &intel_dp->base;
 
-	dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
-
-	if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
+	if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
 		if (intel_dpd_is_edp(dev))
-			dp_priv->is_pch_edp = true;
+			intel_dp->is_pch_edp = true;
 
-	if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
+	if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
 		type = DRM_MODE_CONNECTOR_eDP;
 		intel_encoder->type = INTEL_OUTPUT_EDP;
 	} else {
@@ -1584,18 +1604,16 @@
 	else if (output_reg == DP_D || output_reg == PCH_DP_D)
 		intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
 
-	if (IS_eDP(intel_encoder))
+	if (IS_eDP(intel_dp))
 		intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
 
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 	connector->interlace_allowed = true;
 	connector->doublescan_allowed = 0;
 
-	dp_priv->intel_encoder = intel_encoder;
-	dp_priv->output_reg = output_reg;
-	dp_priv->has_audio = false;
-	dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
-	intel_encoder->dev_priv = dp_priv;
+	intel_dp->output_reg = output_reg;
+	intel_dp->has_audio = false;
+	intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
 
 	drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
 			 DRM_MODE_ENCODER_TMDS);
@@ -1630,12 +1648,12 @@
 			break;
 	}
 
-	intel_dp_i2c_init(intel_encoder, intel_connector, name);
+	intel_dp_i2c_init(intel_dp, intel_connector, name);
 
-	intel_encoder->ddc_bus = &dp_priv->adapter;
+	intel_encoder->ddc_bus = &intel_dp->adapter;
 	intel_encoder->hot_plug = intel_dp_hot_plug;
 
-	if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
+	if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
 		/* initialize panel mode from VBT if available for eDP */
 		if (dev_priv->lfp_lvds_vbt_mode) {
 			dev_priv->panel_fixed_mode =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b219014..0e92aa0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,6 +32,20 @@
 #include "drm_crtc.h"
 
 #include "drm_crtc_helper.h"
+
+#define wait_for(COND, MS, W) ({ \
+	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
+	int ret__ = 0;							\
+	while (! (COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			ret__ = -ETIMEDOUT;				\
+			break;						\
+		}							\
+		if (W) msleep(W);					\
+	}								\
+	ret__;								\
+})
+
 /*
  * Display related stuff
  */
@@ -102,7 +116,6 @@
 	struct i2c_adapter *ddc_bus;
 	bool load_detect_temp;
 	bool needs_tv_clock;
-	void *dev_priv;
 	void (*hot_plug)(struct intel_encoder *);
 	int crtc_mask;
 	int clone_mask;
@@ -110,7 +123,6 @@
 
 struct intel_connector {
 	struct drm_connector base;
-	void *dev_priv;
 };
 
 struct intel_crtc;
@@ -156,7 +168,7 @@
 	uint32_t cursor_addr;
 	int16_t cursor_x, cursor_y;
 	int16_t cursor_width, cursor_height;
-	bool cursor_visble;
+	bool cursor_visible, cursor_on;
 };
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -188,10 +200,18 @@
 extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
 
 
+extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+				   struct drm_display_mode *adjusted_mode);
+extern void intel_pch_panel_fitting(struct drm_device *dev,
+				    int fitting_mode,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode);
+
 extern int intel_panel_fitter_pipe (struct drm_device *dev);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
 extern void intel_encoder_commit (struct drm_encoder *encoder);
+extern void intel_encoder_destroy(struct drm_encoder *encoder);
 
 extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
 
@@ -199,7 +219,8 @@
 						    struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
-extern void intel_wait_for_vblank(struct drm_device *dev);
+extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
+extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 						   struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 227feca..a399f4b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -38,7 +38,7 @@
 #define CH7xxx_ADDR	0x76
 #define TFP410_ADDR	0x38
 
-static struct intel_dvo_device intel_dvo_devices[] = {
+static const struct intel_dvo_device intel_dvo_devices[] = {
 	{
 		.type = INTEL_DVO_CHIP_TMDS,
 		.name = "sil164",
@@ -77,20 +77,33 @@
 	}
 };
 
+struct intel_dvo {
+	struct intel_encoder base;
+
+	struct intel_dvo_device dev;
+
+	struct drm_display_mode *panel_fixed_mode;
+	bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+{
+	return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
+}
+
 static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-	u32 dvo_reg = dvo->dvo_reg;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+	u32 dvo_reg = intel_dvo->dev.dvo_reg;
 	u32 temp = I915_READ(dvo_reg);
 
 	if (mode == DRM_MODE_DPMS_ON) {
 		I915_WRITE(dvo_reg, temp | DVO_ENABLE);
 		I915_READ(dvo_reg);
-		dvo->dev_ops->dpms(dvo, mode);
+		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
 	} else {
-		dvo->dev_ops->dpms(dvo, mode);
+		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
 		I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
 		I915_READ(dvo_reg);
 	}
@@ -100,38 +113,36 @@
 				struct drm_display_mode *mode)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 
 	/* XXX: Validate clock range */
 
-	if (dvo->panel_fixed_mode) {
-		if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
+	if (intel_dvo->panel_fixed_mode) {
+		if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
 			return MODE_PANEL;
-		if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
+		if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
 			return MODE_PANEL;
 	}
 
-	return dvo->dev_ops->mode_valid(dvo, mode);
+	return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
 }
 
 static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
 				 struct drm_display_mode *mode,
 				 struct drm_display_mode *adjusted_mode)
 {
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
 
 	/* If we have timings from the BIOS for the panel, put them in
 	 * to the adjusted mode.  The CRTC will be set up for this mode,
 	 * with the panel scaling set up to source from the H/VDisplay
 	 * of the original mode.
 	 */
-	if (dvo->panel_fixed_mode != NULL) {
-#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
+	if (intel_dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
 		C(hdisplay);
 		C(hsync_start);
 		C(hsync_end);
@@ -145,8 +156,8 @@
 #undef C
 	}
 
-	if (dvo->dev_ops->mode_fixup)
-		return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
+	if (intel_dvo->dev.dev_ops->mode_fixup)
+		return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
 
 	return true;
 }
@@ -158,11 +169,10 @@
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
 	int pipe = intel_crtc->pipe;
 	u32 dvo_val;
-	u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
+	u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
 	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
 
 	switch (dvo_reg) {
@@ -178,7 +188,7 @@
 		break;
 	}
 
-	dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
+	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
 
 	/* Save the data order, since I don't know what it should be set to. */
 	dvo_val = I915_READ(dvo_reg) &
@@ -214,40 +224,38 @@
 static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
 
-	return dvo->dev_ops->detect(dvo);
+	return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
 }
 
 static int intel_dvo_get_modes(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
 
 	/* We should probably have an i2c driver get_modes function for those
 	 * devices which will have a fixed set of modes determined by the chip
 	 * (TV-out, for example), but for now with just TMDS and LVDS,
 	 * that's not the case.
 	 */
-	intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+	intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
 	if (!list_empty(&connector->probed_modes))
 		return 1;
 
-
-	if (dvo->panel_fixed_mode != NULL) {
+	if (intel_dvo->panel_fixed_mode != NULL) {
 		struct drm_display_mode *mode;
-		mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
+		mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
 		if (mode) {
 			drm_mode_probed_add(connector, mode);
 			return 1;
 		}
 	}
+
 	return 0;
 }
 
-static void intel_dvo_destroy (struct drm_connector *connector)
+static void intel_dvo_destroy(struct drm_connector *connector)
 {
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
@@ -277,28 +285,20 @@
 
 static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
 {
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
 
-	if (dvo) {
-		if (dvo->dev_ops->destroy)
-			dvo->dev_ops->destroy(dvo);
-		if (dvo->panel_fixed_mode)
-			kfree(dvo->panel_fixed_mode);
-	}
-	if (intel_encoder->i2c_bus)
-		intel_i2c_destroy(intel_encoder->i2c_bus);
-	if (intel_encoder->ddc_bus)
-		intel_i2c_destroy(intel_encoder->ddc_bus);
-	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
+	if (intel_dvo->dev.dev_ops->destroy)
+		intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+	kfree(intel_dvo->panel_fixed_mode);
+
+	intel_encoder_destroy(encoder);
 }
 
 static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
 	.destroy = intel_dvo_enc_destroy,
 };
 
-
 /**
  * Attempts to get a fixed panel timing for LVDS (currently only the i830).
  *
@@ -306,15 +306,13 @@
  * chip being on DVOB/C and having multiple pipes.
  */
 static struct drm_display_mode *
-intel_dvo_get_current_mode (struct drm_connector *connector)
+intel_dvo_get_current_mode(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-	uint32_t dvo_reg = dvo->dvo_reg;
-	uint32_t dvo_val = I915_READ(dvo_reg);
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+	uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
 	struct drm_display_mode *mode = NULL;
 
 	/* If the DVO port is active, that'll be the LVDS, so we can pull out
@@ -327,7 +325,6 @@
 		crtc = intel_get_crtc_from_pipe(dev, pipe);
 		if (crtc) {
 			mode = intel_crtc_mode_get(dev, crtc);
-
 			if (mode) {
 				mode->type |= DRM_MODE_TYPE_PREFERRED;
 				if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -337,28 +334,32 @@
 			}
 		}
 	}
+
 	return mode;
 }
 
 void intel_dvo_init(struct drm_device *dev)
 {
 	struct intel_encoder *intel_encoder;
+	struct intel_dvo *intel_dvo;
 	struct intel_connector *intel_connector;
-	struct intel_dvo_device *dvo;
 	struct i2c_adapter *i2cbus = NULL;
 	int ret = 0;
 	int i;
 	int encoder_type = DRM_MODE_ENCODER_NONE;
-	intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
-	if (!intel_encoder)
+
+	intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+	if (!intel_dvo)
 		return;
 
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
-		kfree(intel_encoder);
+		kfree(intel_dvo);
 		return;
 	}
 
+	intel_encoder = &intel_dvo->base;
+
 	/* Set up the DDC bus */
 	intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
 	if (!intel_encoder->ddc_bus)
@@ -367,10 +368,9 @@
 	/* Now, try to find a controller */
 	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
 		struct drm_connector *connector = &intel_connector->base;
+		const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
 		int gpio;
 
-		dvo = &intel_dvo_devices[i];
-
 		/* Allow the I2C driver info to specify the GPIO to be used in
 		 * special cases, but otherwise default to what's defined
 		 * in the spec.
@@ -393,11 +393,8 @@
 			continue;
 		}
 
-		if (dvo->dev_ops!= NULL)
-			ret = dvo->dev_ops->init(dvo, i2cbus);
-		else
-			ret = false;
-
+		intel_dvo->dev = *dvo;
+		ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
 		if (!ret)
 			continue;
 
@@ -429,9 +426,6 @@
 		connector->interlace_allowed = false;
 		connector->doublescan_allowed = false;
 
-		intel_encoder->dev_priv = dvo;
-		intel_encoder->i2c_bus = i2cbus;
-
 		drm_encoder_init(dev, &intel_encoder->enc,
 				 &intel_dvo_enc_funcs, encoder_type);
 		drm_encoder_helper_add(&intel_encoder->enc,
@@ -447,9 +441,9 @@
 			 * headers, likely), so for now, just get the current
 			 * mode being output through DVO.
 			 */
-			dvo->panel_fixed_mode =
+			intel_dvo->panel_fixed_mode =
 				intel_dvo_get_current_mode(connector);
-			dvo->panel_wants_dither = true;
+			intel_dvo->panel_wants_dither = true;
 		}
 
 		drm_sysfs_connector_add(connector);
@@ -461,6 +455,6 @@
 	if (i2cbus != NULL)
 		intel_i2c_destroy(i2cbus);
 free_intel:
-	kfree(intel_encoder);
+	kfree(intel_dvo);
 	kfree(intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 197887ed..ccd4c97 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,11 +37,17 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-struct intel_hdmi_priv {
+struct intel_hdmi {
+	struct intel_encoder base;
 	u32 sdvox_reg;
 	bool has_hdmi_sink;
 };
 
+static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+	return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
+}
+
 static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
@@ -50,8 +56,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = encoder->crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	u32 sdvox;
 
 	sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
@@ -60,7 +65,7 @@
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 		sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
 
-	if (hdmi_priv->has_hdmi_sink) {
+	if (intel_hdmi->has_hdmi_sink) {
 		sdvox |= SDVO_AUDIO_ENABLE;
 		if (HAS_PCH_CPT(dev))
 			sdvox |= HDMI_MODE_SELECT;
@@ -73,26 +78,25 @@
 			sdvox |= SDVO_PIPE_B_SELECT;
 	}
 
-	I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
-	POSTING_READ(hdmi_priv->sdvox_reg);
+	I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
+	POSTING_READ(intel_hdmi->sdvox_reg);
 }
 
 static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	u32 temp;
 
-	temp = I915_READ(hdmi_priv->sdvox_reg);
+	temp = I915_READ(intel_hdmi->sdvox_reg);
 
 	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
 	 * we do this anyway which shows more stable in testing.
 	 */
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
-		POSTING_READ(hdmi_priv->sdvox_reg);
+		I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+		POSTING_READ(intel_hdmi->sdvox_reg);
 	}
 
 	if (mode != DRM_MODE_DPMS_ON) {
@@ -101,15 +105,15 @@
 		temp |= SDVO_ENABLE;
 	}
 
-	I915_WRITE(hdmi_priv->sdvox_reg, temp);
-	POSTING_READ(hdmi_priv->sdvox_reg);
+	I915_WRITE(intel_hdmi->sdvox_reg, temp);
+	POSTING_READ(intel_hdmi->sdvox_reg);
 
 	/* HW workaround, need to write this twice for issue that may result
 	 * in first write getting masked.
 	 */
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(hdmi_priv->sdvox_reg, temp);
-		POSTING_READ(hdmi_priv->sdvox_reg);
+		I915_WRITE(intel_hdmi->sdvox_reg, temp);
+		POSTING_READ(intel_hdmi->sdvox_reg);
 	}
 }
 
@@ -138,19 +142,17 @@
 intel_hdmi_detect(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	struct edid *edid = NULL;
 	enum drm_connector_status status = connector_status_disconnected;
 
-	hdmi_priv->has_hdmi_sink = false;
-	edid = drm_get_edid(connector,
-			    intel_encoder->ddc_bus);
+	intel_hdmi->has_hdmi_sink = false;
+	edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
 
 	if (edid) {
 		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
 			status = connector_status_connected;
-			hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+			intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
 		}
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
@@ -162,13 +164,13 @@
 static int intel_hdmi_get_modes(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
 	/* We should parse the EDID data and find out if it's an HDMI sink so
 	 * we can send audio to it.
 	 */
 
-	return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+	return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -199,18 +201,8 @@
 	.best_encoder = intel_attached_encoder,
 };
 
-static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
-	if (intel_encoder->i2c_bus)
-		intel_i2c_destroy(intel_encoder->i2c_bus);
-	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
-}
-
 static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
-	.destroy = intel_hdmi_enc_destroy,
+	.destroy = intel_encoder_destroy,
 };
 
 void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
@@ -219,21 +211,19 @@
 	struct drm_connector *connector;
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
-	struct intel_hdmi_priv *hdmi_priv;
+	struct intel_hdmi *intel_hdmi;
 
-	intel_encoder = kcalloc(sizeof(struct intel_encoder) +
-			       sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
-	if (!intel_encoder)
+	intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
+	if (!intel_hdmi)
 		return;
 
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
-		kfree(intel_encoder);
+		kfree(intel_hdmi);
 		return;
 	}
 
-	hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
-
+	intel_encoder = &intel_hdmi->base;
 	connector = &intel_connector->base;
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_HDMIA);
@@ -274,8 +264,7 @@
 	if (!intel_encoder->ddc_bus)
 		goto err_connector;
 
-	hdmi_priv->sdvox_reg = sdvox_reg;
-	intel_encoder->dev_priv = hdmi_priv;
+	intel_hdmi->sdvox_reg = sdvox_reg;
 
 	drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
 			 DRM_MODE_ENCODER_TMDS);
@@ -298,7 +287,7 @@
 
 err_connector:
 	drm_connector_cleanup(connector);
-	kfree(intel_encoder);
+	kfree(intel_hdmi);
 	kfree(intel_connector);
 
 	return;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0a2e600..b819c10 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -41,12 +41,18 @@
 #include <linux/acpi.h>
 
 /* Private structure for the integrated LVDS support */
-struct intel_lvds_priv {
+struct intel_lvds {
+	struct intel_encoder base;
 	int fitting_mode;
 	u32 pfit_control;
 	u32 pfit_pgm_ratios;
 };
 
+static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
+{
+	return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
+}
+
 /**
  * Sets the backlight level.
  *
@@ -90,7 +96,7 @@
 static void intel_lvds_set_power(struct drm_device *dev, bool on)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_status, ctl_reg, status_reg, lvds_reg;
+	u32 ctl_reg, status_reg, lvds_reg;
 
 	if (HAS_PCH_SPLIT(dev)) {
 		ctl_reg = PCH_PP_CONTROL;
@@ -108,9 +114,8 @@
 
 		I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
 			   POWER_TARGET_ON);
-		do {
-			pp_status = I915_READ(status_reg);
-		} while ((pp_status & PP_ON) == 0);
+		if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
+			DRM_ERROR("timed out waiting to enable LVDS pipe");
 
 		intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
 	} else {
@@ -118,9 +123,8 @@
 
 		I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
 			   ~POWER_TARGET_ON);
-		do {
-			pp_status = I915_READ(status_reg);
-		} while (pp_status & PP_ON);
+		if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
+			DRM_ERROR("timed out waiting for LVDS pipe to turn off");
 
 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
 		POSTING_READ(lvds_reg);
@@ -219,9 +223,8 @@
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
 	struct drm_encoder *tmp_encoder;
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
 
 	/* Should never happen!! */
@@ -241,26 +244,20 @@
 	/* If we don't have a panel mode, there is nothing we can do */
 	if (dev_priv->panel_fixed_mode == NULL)
 		return true;
+
 	/*
 	 * We have timings from the BIOS for the panel, put them in
 	 * to the adjusted mode.  The CRTC will be set up for this mode,
 	 * with the panel scaling set up to source from the H/VDisplay
 	 * of the original mode.
 	 */
-	adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
-	adjusted_mode->hsync_start =
-		dev_priv->panel_fixed_mode->hsync_start;
-	adjusted_mode->hsync_end =
-		dev_priv->panel_fixed_mode->hsync_end;
-	adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
-	adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
-	adjusted_mode->vsync_start =
-		dev_priv->panel_fixed_mode->vsync_start;
-	adjusted_mode->vsync_end =
-		dev_priv->panel_fixed_mode->vsync_end;
-	adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
-	adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
-	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+	intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+					mode, adjusted_mode);
+		return true;
+	}
 
 	/* Make sure pre-965s set dither correctly */
 	if (!IS_I965G(dev)) {
@@ -273,10 +270,6 @@
 	    adjusted_mode->vdisplay == mode->vdisplay)
 		goto out;
 
-	/* full screen scale for now */
-	if (HAS_PCH_SPLIT(dev))
-		goto out;
-
 	/* 965+ wants fuzzy fitting */
 	if (IS_I965G(dev))
 		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -288,12 +281,10 @@
 	 * to register description and PRM.
 	 * Change the value here to see the borders for debugging
 	 */
-	if (!HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(BCLRPAT_A, 0);
-		I915_WRITE(BCLRPAT_B, 0);
-	}
+	I915_WRITE(BCLRPAT_A, 0);
+	I915_WRITE(BCLRPAT_B, 0);
 
-	switch (lvds_priv->fitting_mode) {
+	switch (intel_lvds->fitting_mode) {
 	case DRM_MODE_SCALE_CENTER:
 		/*
 		 * For centered modes, we have to calculate border widths &
@@ -378,8 +369,8 @@
 	}
 
 out:
-	lvds_priv->pfit_control = pfit_control;
-	lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
+	intel_lvds->pfit_control = pfit_control;
+	intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
 	dev_priv->lvds_border_bits = border;
 
 	/*
@@ -427,8 +418,7 @@
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+	struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
 
 	/*
 	 * The LVDS pin pair will already have been turned on in the
@@ -444,8 +434,8 @@
 	 * screen.  Should be enabled before the pipe is enabled, according to
 	 * register description and PRM.
 	 */
-	I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
-	I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
+	I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+	I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
 }
 
 /**
@@ -600,18 +590,17 @@
 				connector->encoder) {
 		struct drm_crtc *crtc = connector->encoder->crtc;
 		struct drm_encoder *encoder = connector->encoder;
-		struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-		struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+		struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
 
 		if (value == DRM_MODE_SCALE_NONE) {
 			DRM_DEBUG_KMS("no scaling not supported\n");
 			return 0;
 		}
-		if (lvds_priv->fitting_mode == value) {
+		if (intel_lvds->fitting_mode == value) {
 			/* the LVDS scaling property is not changed */
 			return 0;
 		}
-		lvds_priv->fitting_mode = value;
+		intel_lvds->fitting_mode = value;
 		if (crtc && crtc->enabled) {
 			/*
 			 * If the CRTC is enabled, the display will be changed
@@ -647,19 +636,8 @@
 	.destroy = intel_lvds_destroy,
 };
 
-
-static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
-	if (intel_encoder->ddc_bus)
-		intel_i2c_destroy(intel_encoder->ddc_bus);
-	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
-}
-
 static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
-	.destroy = intel_lvds_enc_destroy,
+	.destroy = intel_encoder_destroy,
 };
 
 static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
@@ -843,13 +821,13 @@
 void intel_lvds_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds *intel_lvds;
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct drm_display_mode *scan; /* *modes, *bios_mode; */
 	struct drm_crtc *crtc;
-	struct intel_lvds_priv *lvds_priv;
 	u32 lvds;
 	int pipe, gpio = GPIOC;
 
@@ -872,20 +850,20 @@
 		gpio = PCH_GPIOC;
 	}
 
-	intel_encoder = kzalloc(sizeof(struct intel_encoder) +
-				sizeof(struct intel_lvds_priv), GFP_KERNEL);
-	if (!intel_encoder) {
+	intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
+	if (!intel_lvds) {
 		return;
 	}
 
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
-		kfree(intel_encoder);
+		kfree(intel_lvds);
 		return;
 	}
 
-	connector = &intel_connector->base;
+	intel_encoder = &intel_lvds->base;
 	encoder = &intel_encoder->enc;
+	connector = &intel_connector->base;
 	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
 
@@ -905,8 +883,6 @@
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
 
-	lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
-	intel_encoder->dev_priv = lvds_priv;
 	/* create the scaling mode property */
 	drm_mode_create_scaling_mode_property(dev);
 	/*
@@ -916,7 +892,7 @@
 	drm_connector_attach_property(&intel_connector->base,
 				      dev->mode_config.scaling_mode_property,
 				      DRM_MODE_SCALE_ASPECT);
-	lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
+	intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
 	/*
 	 * LVDS discovery:
 	 * 1) check for EDID on DDC
@@ -1024,6 +1000,6 @@
 		intel_i2c_destroy(intel_encoder->ddc_bus);
 	drm_connector_cleanup(connector);
 	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
+	kfree(intel_lvds);
 	kfree(intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d39aea24..4f00390 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1367,7 +1367,8 @@
 		overlay->flip_addr = overlay->reg_bo->gtt_offset;
 	} else {
 		ret = i915_gem_attach_phys_object(dev, reg_bo,
-				I915_GEM_PHYS_OVERLAY_REGS);
+						  I915_GEM_PHYS_OVERLAY_REGS,
+						  0);
                 if (ret) {
                         DRM_ERROR("failed to attach phys overlay regs\n");
                         goto out_free_bo;
@@ -1416,3 +1417,99 @@
 		kfree(dev_priv->overlay);
 	}
 }
+
+struct intel_overlay_error_state {
+	struct overlay_registers regs;
+	unsigned long base;
+	u32 dovsta;
+	u32 isr;
+};
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay = dev_priv->overlay;
+	struct intel_overlay_error_state *error;
+	struct overlay_registers __iomem *regs;
+
+	if (!overlay || !overlay->active)
+		return NULL;
+
+	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+	if (error == NULL)
+		return NULL;
+
+	error->dovsta = I915_READ(DOVSTA);
+	error->isr = I915_READ(ISR);
+	if (OVERLAY_NONPHYSICAL(overlay->dev))
+		error->base = (long) overlay->reg_bo->gtt_offset;
+	else
+		error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	if (!regs)
+		goto err;
+
+	memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+	intel_overlay_unmap_regs_atomic(overlay);
+
+	return error;
+
+err:
+	kfree(error);
+	return NULL;
+}
+
+void
+intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
+{
+	seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+		   error->dovsta, error->isr);
+	seq_printf(m, "  Register file at 0x%08lx:\n",
+		   error->base);
+
+#define P(x) seq_printf(m, "    " #x ":	0x%08x\n", error->regs.x)
+	P(OBUF_0Y);
+	P(OBUF_1Y);
+	P(OBUF_0U);
+	P(OBUF_0V);
+	P(OBUF_1U);
+	P(OBUF_1V);
+	P(OSTRIDE);
+	P(YRGB_VPH);
+	P(UV_VPH);
+	P(HORZ_PH);
+	P(INIT_PHS);
+	P(DWINPOS);
+	P(DWINSZ);
+	P(SWIDTH);
+	P(SWIDTHSW);
+	P(SHEIGHT);
+	P(YRGBSCALE);
+	P(UVSCALE);
+	P(OCLRC0);
+	P(OCLRC1);
+	P(DCLRKV);
+	P(DCLRKM);
+	P(SCLRKVH);
+	P(SCLRKVL);
+	P(SCLRKEN);
+	P(OCONFIG);
+	P(OCMD);
+	P(OSTART_0Y);
+	P(OSTART_1Y);
+	P(OSTART_0U);
+	P(OSTART_0V);
+	P(OSTART_1U);
+	P(OSTART_1V);
+	P(OTILEOFF_0Y);
+	P(OTILEOFF_1Y);
+	P(OTILEOFF_0U);
+	P(OTILEOFF_0V);
+	P(OTILEOFF_1U);
+	P(OTILEOFF_1V);
+	P(FASTHSCALE);
+	P(UVSCALEV);
+#undef P
+}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
new file mode 100644
index 0000000..e7f5299
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ *      Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "intel_drv.h"
+
+void
+intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+		       struct drm_display_mode *adjusted_mode)
+{
+	adjusted_mode->hdisplay = fixed_mode->hdisplay;
+	adjusted_mode->hsync_start = fixed_mode->hsync_start;
+	adjusted_mode->hsync_end = fixed_mode->hsync_end;
+	adjusted_mode->htotal = fixed_mode->htotal;
+
+	adjusted_mode->vdisplay = fixed_mode->vdisplay;
+	adjusted_mode->vsync_start = fixed_mode->vsync_start;
+	adjusted_mode->vsync_end = fixed_mode->vsync_end;
+	adjusted_mode->vtotal = fixed_mode->vtotal;
+
+	adjusted_mode->clock = fixed_mode->clock;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct drm_device *dev,
+			int fitting_mode,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int x, y, width, height;
+
+	x = y = width = height = 0;
+
+	/* Native modes don't need fitting */
+	if (adjusted_mode->hdisplay == mode->hdisplay &&
+	    adjusted_mode->vdisplay == mode->vdisplay)
+		goto done;
+
+	switch (fitting_mode) {
+	case DRM_MODE_SCALE_CENTER:
+		width = mode->hdisplay;
+		height = mode->vdisplay;
+		x = (adjusted_mode->hdisplay - width + 1)/2;
+		y = (adjusted_mode->vdisplay - height + 1)/2;
+		break;
+
+	case DRM_MODE_SCALE_ASPECT:
+		/* Scale but preserve the aspect ratio */
+		{
+			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+			if (scaled_width > scaled_height) { /* pillar */
+				width = scaled_height / mode->vdisplay;
+				x = (adjusted_mode->hdisplay - width + 1) / 2;
+				y = 0;
+				height = adjusted_mode->vdisplay;
+			} else if (scaled_width < scaled_height) { /* letter */
+				height = scaled_width / mode->hdisplay;
+				y = (adjusted_mode->vdisplay - height + 1) / 2;
+				x = 0;
+				width = adjusted_mode->hdisplay;
+			} else {
+				x = y = 0;
+				width = adjusted_mode->hdisplay;
+				height = adjusted_mode->vdisplay;
+			}
+		}
+		break;
+
+	default:
+	case DRM_MODE_SCALE_FULLSCREEN:
+		x = y = 0;
+		width = adjusted_mode->hdisplay;
+		height = adjusted_mode->vdisplay;
+		break;
+	}
+
+done:
+	dev_priv->pch_pf_pos = (x << 16) | y;
+	dev_priv->pch_pf_size = (width << 16) | height;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 26362f8..51e9c9e7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,18 +33,35 @@
 #include "i915_drm.h"
 #include "i915_trace.h"
 
+static u32 i915_gem_get_seqno(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 seqno;
+
+	seqno = dev_priv->next_seqno;
+
+	/* reserve 0 for non-seqno */
+	if (++dev_priv->next_seqno == 0)
+		dev_priv->next_seqno = 1;
+
+	return seqno;
+}
+
 static void
 render_ring_flush(struct drm_device *dev,
 		struct intel_ring_buffer *ring,
 		u32	invalidate_domains,
 		u32	flush_domains)
 {
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 cmd;
+
 #if WATCH_EXEC
 	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
 		  invalidate_domains, flush_domains);
 #endif
-	u32 cmd;
-	trace_i915_gem_request_flush(dev, ring->next_seqno,
+
+	trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
 				     invalidate_domains, flush_domains);
 
 	if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@@ -233,9 +250,10 @@
 		struct drm_file *file_priv,
 		u32 flush_domains)
 {
-	u32 seqno;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	seqno = intel_ring_get_seqno(dev, ring);
+	u32 seqno;
+
+	seqno = i915_gem_get_seqno(dev);
 
 	if (IS_GEN6(dev)) {
 		BEGIN_LP_RING(6);
@@ -405,7 +423,9 @@
 		u32 flush_domains)
 {
 	u32 seqno;
-	seqno = intel_ring_get_seqno(dev, ring);
+
+	seqno = i915_gem_get_seqno(dev);
+
 	intel_ring_begin(dev, ring, 4);
 	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(dev, ring,
@@ -479,7 +499,7 @@
 	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
 	exec_len = (uint32_t) exec->batch_len;
 
-	trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+	trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
 
 	count = nbox ? nbox : 1;
 
@@ -515,7 +535,16 @@
 		intel_ring_advance(dev, ring);
 	}
 
+	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+		intel_ring_begin(dev, ring, 2);
+		intel_ring_emit(dev, ring, MI_FLUSH |
+				MI_NO_WRITE_FLUSH |
+				MI_INVALIDATE_ISP );
+		intel_ring_emit(dev, ring, MI_NOOP);
+		intel_ring_advance(dev, ring);
+	}
 	/* XXX breadcrumb */
+
 	return 0;
 }
 
@@ -588,9 +617,10 @@
 int intel_init_ring_buffer(struct drm_device *dev,
 		struct intel_ring_buffer *ring)
 {
-	int ret;
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_gem_object *obj;
+	int ret;
+
 	ring->dev = dev;
 
 	if (I915_NEED_GFX_HWS(dev)) {
@@ -603,16 +633,14 @@
 	if (obj == NULL) {
 		DRM_ERROR("Failed to allocate ringbuffer\n");
 		ret = -ENOMEM;
-		goto cleanup;
+		goto err_hws;
 	}
 
 	ring->gem_object = obj;
 
 	ret = i915_gem_object_pin(obj, ring->alignment);
-	if (ret != 0) {
-		drm_gem_object_unreference(obj);
-		goto cleanup;
-	}
+	if (ret)
+		goto err_unref;
 
 	obj_priv = to_intel_bo(obj);
 	ring->map.size = ring->size;
@@ -624,18 +652,14 @@
 	drm_core_ioremap_wc(&ring->map, dev);
 	if (ring->map.handle == NULL) {
 		DRM_ERROR("Failed to map ringbuffer.\n");
-		i915_gem_object_unpin(obj);
-		drm_gem_object_unreference(obj);
 		ret = -EINVAL;
-		goto cleanup;
+		goto err_unpin;
 	}
 
 	ring->virtual_start = ring->map.handle;
 	ret = ring->init(dev, ring);
-	if (ret != 0) {
-		intel_cleanup_ring_buffer(dev, ring);
-		return ret;
-	}
+	if (ret)
+		goto err_unmap;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_kernel_lost_context(dev);
@@ -649,7 +673,15 @@
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
 	return ret;
-cleanup:
+
+err_unmap:
+	drm_core_ioremapfree(&ring->map, dev);
+err_unpin:
+	i915_gem_object_unpin(obj);
+err_unref:
+	drm_gem_object_unreference(obj);
+	ring->gem_object = NULL;
+err_hws:
 	cleanup_status_page(dev, ring);
 	return ret;
 }
@@ -682,9 +714,11 @@
 	}
 
 	virt = (unsigned int *)(ring->virtual_start + ring->tail);
-	rem /= 4;
-	while (rem--)
+	rem /= 8;
+	while (rem--) {
 		*virt++ = MI_NOOP;
+		*virt++ = MI_NOOP;
+	}
 
 	ring->tail = 0;
 	ring->space = ring->head - 8;
@@ -729,21 +763,14 @@
 		intel_wrap_ring_buffer(dev, ring);
 	if (unlikely(ring->space < n))
 		intel_wait_ring_buffer(dev, ring, n);
-}
 
-void intel_ring_emit(struct drm_device *dev,
-		struct intel_ring_buffer *ring, unsigned int data)
-{
-	unsigned int *virt = ring->virtual_start + ring->tail;
-	*virt = data;
-	ring->tail += 4;
-	ring->tail &= ring->size - 1;
-	ring->space -= 4;
+	ring->space -= n;
 }
 
 void intel_ring_advance(struct drm_device *dev,
 		struct intel_ring_buffer *ring)
 {
+	ring->tail &= ring->size - 1;
 	ring->advance_ring(dev, ring);
 }
 
@@ -762,18 +789,6 @@
 	intel_ring_advance(dev, ring);
 }
 
-u32 intel_ring_get_seqno(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
-{
-	u32 seqno;
-	seqno = ring->next_seqno;
-
-	/* reserve 0 for non-seqno */
-	if (++ring->next_seqno == 0)
-		ring->next_seqno = 1;
-	return seqno;
-}
-
 struct intel_ring_buffer render_ring = {
 	.name			= "render ring",
 	.regs                   = {
@@ -791,7 +806,6 @@
 	.head			= 0,
 	.tail			= 0,
 	.space			= 0,
-	.next_seqno		= 1,
 	.user_irq_refcount	= 0,
 	.irq_gem_seqno		= 0,
 	.waiting_gem_seqno	= 0,
@@ -830,7 +844,6 @@
 	.head			= 0,
 	.tail			= 0,
 	.space			= 0,
-	.next_seqno		= 1,
 	.user_irq_refcount	= 0,
 	.irq_gem_seqno		= 0,
 	.waiting_gem_seqno	= 0,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d5568d3..525e7d3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -26,7 +26,6 @@
 	unsigned int	head;
 	unsigned int	tail;
 	unsigned int	space;
-	u32		next_seqno;
 	struct intel_hw_status_page status_page;
 
 	u32		irq_gem_seqno;		/* last seq seem at irq time */
@@ -106,8 +105,16 @@
 		struct intel_ring_buffer *ring);
 void intel_ring_begin(struct drm_device *dev,
 		struct intel_ring_buffer *ring, int n);
-void intel_ring_emit(struct drm_device *dev,
-		struct intel_ring_buffer *ring, u32 data);
+
+static inline void intel_ring_emit(struct drm_device *dev,
+				   struct intel_ring_buffer *ring,
+				   unsigned int data)
+{
+	unsigned int *virt = ring->virtual_start + ring->tail;
+	*virt = data;
+	ring->tail += 4;
+}
+
 void intel_fill_struct(struct drm_device *dev,
 		struct intel_ring_buffer *ring,
 		void *data,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d9d4d51..093e914 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -31,8 +31,8 @@
 #include "drmP.h"
 #include "drm.h"
 #include "drm_crtc.h"
-#include "intel_drv.h"
 #include "drm_edid.h"
+#include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "intel_sdvo_regs.h"
@@ -47,9 +47,10 @@
 
 #define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
 #define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
 
 
-static char *tv_format_names[] = {
+static const char *tv_format_names[] = {
 	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
 	"PAL_B"    , "PAL_D"   , "PAL_G"   ,
 	"PAL_H"    , "PAL_I"   , "PAL_M"   ,
@@ -61,7 +62,9 @@
 
 #define TV_FORMAT_NUM  (sizeof(tv_format_names) / sizeof(*tv_format_names))
 
-struct intel_sdvo_priv {
+struct intel_sdvo {
+	struct intel_encoder base;
+
 	u8 slave_addr;
 
 	/* Register for the SDVO device: SDVOB or SDVOC */
@@ -95,7 +98,7 @@
 	bool is_tv;
 
 	/* This is for current tv format name */
-	char *tv_format_name;
+	int tv_format_index;
 
 	/**
 	 * This is set if we treat the device as HDMI, instead of DVI.
@@ -132,37 +135,40 @@
 };
 
 struct intel_sdvo_connector {
+	struct intel_connector base;
+
 	/* Mark the type of connector */
 	uint16_t output_flag;
 
 	/* This contains all current supported TV format */
-	char *tv_format_supported[TV_FORMAT_NUM];
+	u8 tv_format_supported[TV_FORMAT_NUM];
 	int   format_supported_num;
-	struct drm_property *tv_format_property;
-	struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
-	/**
-	 * Returned SDTV resolutions allowed for the current format, if the
-	 * device reported it.
-	 */
-	struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+	struct drm_property *tv_format;
 
 	/* add the property for the SDVO-TV */
-	struct drm_property *left_property;
-	struct drm_property *right_property;
-	struct drm_property *top_property;
-	struct drm_property *bottom_property;
-	struct drm_property *hpos_property;
-	struct drm_property *vpos_property;
+	struct drm_property *left;
+	struct drm_property *right;
+	struct drm_property *top;
+	struct drm_property *bottom;
+	struct drm_property *hpos;
+	struct drm_property *vpos;
+	struct drm_property *contrast;
+	struct drm_property *saturation;
+	struct drm_property *hue;
+	struct drm_property *sharpness;
+	struct drm_property *flicker_filter;
+	struct drm_property *flicker_filter_adaptive;
+	struct drm_property *flicker_filter_2d;
+	struct drm_property *tv_chroma_filter;
+	struct drm_property *tv_luma_filter;
+	struct drm_property *dot_crawl;
 
 	/* add the property for the SDVO-TV/LVDS */
-	struct drm_property *brightness_property;
-	struct drm_property *contrast_property;
-	struct drm_property *saturation_property;
-	struct drm_property *hue_property;
+	struct drm_property *brightness;
 
 	/* Add variable to record current setting for the above property */
 	u32	left_margin, right_margin, top_margin, bottom_margin;
+
 	/* this is to get the range of margin.*/
 	u32	max_hscan,  max_vscan;
 	u32	max_hpos, cur_hpos;
@@ -171,36 +177,54 @@
 	u32	cur_contrast,	max_contrast;
 	u32	cur_saturation, max_saturation;
 	u32	cur_hue,	max_hue;
+	u32	cur_sharpness,	max_sharpness;
+	u32	cur_flicker_filter,		max_flicker_filter;
+	u32	cur_flicker_filter_adaptive,	max_flicker_filter_adaptive;
+	u32	cur_flicker_filter_2d,		max_flicker_filter_2d;
+	u32	cur_tv_chroma_filter,	max_tv_chroma_filter;
+	u32	cur_tv_luma_filter,	max_tv_luma_filter;
+	u32	cur_dot_crawl,	max_dot_crawl;
 };
 
+static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
+{
+	return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
+}
+
+static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+{
+	return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
 static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
-			uint16_t flags);
-static void
-intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
-static void
-intel_sdvo_create_enhance_property(struct drm_connector *connector);
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+			      struct intel_sdvo_connector *intel_sdvo_connector,
+			      int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+				   struct intel_sdvo_connector *intel_sdvo_connector);
 
 /**
  * Writes the SDVOB or SDVOC with the given value, but always writes both
  * SDVOB and SDVOC to work around apparent hardware issues (according to
  * comments in the BIOS).
  */
-static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 {
-	struct drm_device *dev = intel_encoder->enc.dev;
+	struct drm_device *dev = intel_sdvo->base.enc.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_sdvo_priv   *sdvo_priv = intel_encoder->dev_priv;
 	u32 bval = val, cval = val;
 	int i;
 
-	if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
-		I915_WRITE(sdvo_priv->sdvo_reg, val);
-		I915_READ(sdvo_priv->sdvo_reg);
+	if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+		I915_WRITE(intel_sdvo->sdvo_reg, val);
+		I915_READ(intel_sdvo->sdvo_reg);
 		return;
 	}
 
-	if (sdvo_priv->sdvo_reg == SDVOB) {
+	if (intel_sdvo->sdvo_reg == SDVOB) {
 		cval = I915_READ(SDVOC);
 	} else {
 		bval = I915_READ(SDVOB);
@@ -219,33 +243,27 @@
 	}
 }
 
-static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
-				 u8 *ch)
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
 {
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	u8 out_buf[2];
+	u8 out_buf[2] = { addr, 0 };
 	u8 buf[2];
-	int ret;
-
 	struct i2c_msg msgs[] = {
 		{
-			.addr = sdvo_priv->slave_addr >> 1,
+			.addr = intel_sdvo->slave_addr >> 1,
 			.flags = 0,
 			.len = 1,
 			.buf = out_buf,
 		},
 		{
-			.addr = sdvo_priv->slave_addr >> 1,
+			.addr = intel_sdvo->slave_addr >> 1,
 			.flags = I2C_M_RD,
 			.len = 1,
 			.buf = buf,
 		}
 	};
+	int ret;
 
-	out_buf[0] = addr;
-	out_buf[1] = 0;
-
-	if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
+	if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
 	{
 		*ch = buf[0];
 		return true;
@@ -255,35 +273,26 @@
 	return false;
 }
 
-static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
-				  u8 ch)
+static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
 {
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	u8 out_buf[2];
+	u8 out_buf[2] = { addr, ch };
 	struct i2c_msg msgs[] = {
 		{
-			.addr = sdvo_priv->slave_addr >> 1,
+			.addr = intel_sdvo->slave_addr >> 1,
 			.flags = 0,
 			.len = 2,
 			.buf = out_buf,
 		}
 	};
 
-	out_buf[0] = addr;
-	out_buf[1] = ch;
-
-	if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
-	{
-		return true;
-	}
-	return false;
+	return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
 }
 
 #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
 /** Mapping of command numbers to names, for debug output */
 static const struct _sdvo_cmd_name {
 	u8 cmd;
-	char *name;
+	const char *name;
 } sdvo_cmd_names[] = {
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -328,13 +337,14 @@
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
     /* Add the op code for SDVO enhancements */
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
@@ -353,6 +363,27 @@
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
     /* HDMI op code */
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -377,17 +408,15 @@
 };
 
 #define IS_SDVOB(reg)	(reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
-#define SDVO_PRIV(encoder)   ((struct intel_sdvo_priv *) (encoder)->dev_priv)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
 
-static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
-				   void *args, int args_len)
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+				   const void *args, int args_len)
 {
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 	int i;
 
 	DRM_DEBUG_KMS("%s: W: %02X ",
-				SDVO_NAME(sdvo_priv), cmd);
+				SDVO_NAME(intel_sdvo), cmd);
 	for (i = 0; i < args_len; i++)
 		DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
 	for (; i < 8; i++)
@@ -403,19 +432,20 @@
 	DRM_LOG_KMS("\n");
 }
 
-static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
-				 void *args, int args_len)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+				 const void *args, int args_len)
 {
 	int i;
 
-	intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
+	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
 
 	for (i = 0; i < args_len; i++) {
-		intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
-				      ((u8*)args)[i]);
+		if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
+					   ((u8*)args)[i]))
+			return false;
 	}
 
-	intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
+	return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
 }
 
 static const char *cmd_status_names[] = {
@@ -428,14 +458,13 @@
 	"Scaling not supported"
 };
 
-static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
+static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
 				      void *response, int response_len,
 				      u8 status)
 {
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 	int i;
 
-	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
+	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
 	for (i = 0; i < response_len; i++)
 		DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
 	for (; i < 8; i++)
@@ -447,8 +476,8 @@
 	DRM_LOG_KMS("\n");
 }
 
-static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
-				   void *response, int response_len)
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+				     void *response, int response_len)
 {
 	int i;
 	u8 status;
@@ -457,24 +486,26 @@
 	while (retry--) {
 		/* Read the command response */
 		for (i = 0; i < response_len; i++) {
-			intel_sdvo_read_byte(intel_encoder,
-					     SDVO_I2C_RETURN_0 + i,
-					     &((u8 *)response)[i]);
+			if (!intel_sdvo_read_byte(intel_sdvo,
+						  SDVO_I2C_RETURN_0 + i,
+						  &((u8 *)response)[i]))
+				return false;
 		}
 
 		/* read the return status */
-		intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
-				     &status);
+		if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
+					  &status))
+			return false;
 
-		intel_sdvo_debug_response(intel_encoder, response, response_len,
+		intel_sdvo_debug_response(intel_sdvo, response, response_len,
 					  status);
 		if (status != SDVO_CMD_STATUS_PENDING)
-			return status;
+			break;
 
 		mdelay(50);
 	}
 
-	return status;
+	return status == SDVO_CMD_STATUS_SUCCESS;
 }
 
 static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -494,37 +525,36 @@
  * another I2C transaction after issuing the DDC bus switch, it will be
  * switched to the internal SDVO register.
  */
-static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
+static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
 					      u8 target)
 {
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 	u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
 	struct i2c_msg msgs[] = {
 		{
-			.addr = sdvo_priv->slave_addr >> 1,
+			.addr = intel_sdvo->slave_addr >> 1,
 			.flags = 0,
 			.len = 2,
 			.buf = out_buf,
 		},
 		/* the following two are to read the response */
 		{
-			.addr = sdvo_priv->slave_addr >> 1,
+			.addr = intel_sdvo->slave_addr >> 1,
 			.flags = 0,
 			.len = 1,
 			.buf = cmd_buf,
 		},
 		{
-			.addr = sdvo_priv->slave_addr >> 1,
+			.addr = intel_sdvo->slave_addr >> 1,
 			.flags = I2C_M_RD,
 			.len = 1,
 			.buf = ret_value,
 		},
 	};
 
-	intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+	intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
 					&target, 1);
 	/* write the DDC switch command argument */
-	intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
+	intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
 
 	out_buf[0] = SDVO_I2C_OPCODE;
 	out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -533,7 +563,7 @@
 	ret_value[0] = 0;
 	ret_value[1] = 0;
 
-	ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
+	ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
 	if (ret != 3) {
 		/* failure in I2C transfer */
 		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -547,23 +577,29 @@
 	return;
 }
 
-static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+{
+	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+		return false;
+
+	return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
+
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+		return false;
+
+	return intel_sdvo_read_response(intel_sdvo, value, len);
+}
+
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
 {
 	struct intel_sdvo_set_target_input_args targets = {0};
-	u8 status;
-
-	if (target_0 && target_1)
-		return SDVO_CMD_STATUS_NOTSUPP;
-
-	if (target_1)
-		targets.target_1 = 1;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
-			     sizeof(targets));
-
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
-	return (status == SDVO_CMD_STATUS_SUCCESS);
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TARGET_INPUT,
+				    &targets, sizeof(targets));
 }
 
 /**
@@ -572,14 +608,12 @@
  * This function is making an assumption about the layout of the response,
  * which should be checked against the docs.
  */
-static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
 {
 	struct intel_sdvo_get_trained_inputs_response response;
-	u8 status;
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
-	if (status != SDVO_CMD_STATUS_SUCCESS)
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+				  &response, sizeof(response)))
 		return false;
 
 	*input_1 = response.input0_trained;
@@ -587,21 +621,18 @@
 	return true;
 }
 
-static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
 					  u16 outputs)
 {
-	u8 status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
-			     sizeof(outputs));
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-	return (status == SDVO_CMD_STATUS_SUCCESS);
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_ACTIVE_OUTPUTS,
+				    &outputs, sizeof(outputs));
 }
 
-static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
 					       int mode)
 {
-	u8 status, state = SDVO_ENCODER_STATE_ON;
+	u8 state = SDVO_ENCODER_STATE_ON;
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
@@ -618,88 +649,63 @@
 		break;
 	}
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
-			     sizeof(state));
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
-	return (status == SDVO_CMD_STATUS_SUCCESS);
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
 }
 
-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
 						   int *clock_min,
 						   int *clock_max)
 {
 	struct intel_sdvo_pixel_clock_range clocks;
-	u8 status;
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
-			     NULL, 0);
-
-	status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
-
-	if (status != SDVO_CMD_STATUS_SUCCESS)
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+				  &clocks, sizeof(clocks)))
 		return false;
 
 	/* Convert the values from units of 10 kHz to kHz. */
 	*clock_min = clocks.min * 10;
 	*clock_max = clocks.max * 10;
-
 	return true;
 }
 
-static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
 					 u16 outputs)
 {
-	u8 status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
-			     sizeof(outputs));
-
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-	return (status == SDVO_CMD_STATUS_SUCCESS);
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TARGET_OUTPUT,
+				    &outputs, sizeof(outputs));
 }
 
-static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
 				  struct intel_sdvo_dtd *dtd)
 {
-	u8 status;
-
-	intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	return true;
+	return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+		intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
 }
 
-static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
 					 struct intel_sdvo_dtd *dtd)
 {
-	return intel_sdvo_set_timing(intel_encoder,
+	return intel_sdvo_set_timing(intel_sdvo,
 				     SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
 }
 
-static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
 					 struct intel_sdvo_dtd *dtd)
 {
-	return intel_sdvo_set_timing(intel_encoder,
+	return intel_sdvo_set_timing(intel_sdvo,
 				     SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
 }
 
 static bool
-intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
 					 uint16_t clock,
 					 uint16_t width,
 					 uint16_t height)
 {
 	struct intel_sdvo_preferred_input_timing_args args;
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	uint8_t status;
 
 	memset(&args, 0, sizeof(args));
 	args.clock = clock;
@@ -707,59 +713,32 @@
 	args.height = height;
 	args.interlace = 0;
 
-	if (sdvo_priv->is_lvds &&
-	   (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width ||
-	    sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
+	if (intel_sdvo->is_lvds &&
+	   (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+	    intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
 		args.scaled = 1;
 
-	intel_sdvo_write_cmd(intel_encoder,
-			     SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
-			     &args, sizeof(args));
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	return true;
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+				    &args, sizeof(args));
 }
 
-static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
 						  struct intel_sdvo_dtd *dtd)
 {
-	bool status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
-			     NULL, 0);
-
-	status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
-					  sizeof(dtd->part1));
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
-			     NULL, 0);
-
-	status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
-					  sizeof(dtd->part2));
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	return false;
+	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+				    &dtd->part1, sizeof(dtd->part1)) &&
+		intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+				     &dtd->part2, sizeof(dtd->part2));
 }
 
-static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
 {
-	u8 status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	return true;
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
 }
 
 static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
-					 struct drm_display_mode *mode)
+					 const struct drm_display_mode *mode)
 {
 	uint16_t width, height;
 	uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
@@ -808,7 +787,7 @@
 }
 
 static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
-					 struct intel_sdvo_dtd *dtd)
+					 const struct intel_sdvo_dtd *dtd)
 {
 	mode->hdisplay = dtd->part1.h_active;
 	mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
@@ -840,45 +819,33 @@
 		mode->flags |= DRM_MODE_FLAG_PVSYNC;
 }
 
-static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
 				       struct intel_sdvo_encode *encode)
 {
-	uint8_t status;
+	if (intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_SUPP_ENCODE,
+				  encode, sizeof(*encode)))
+		return true;
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
-	if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
-		memset(encode, 0, sizeof(*encode));
-		return false;
-	}
-
-	return true;
+	/* non-support means DVI */
+	memset(encode, 0, sizeof(*encode));
+	return false;
 }
 
-static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
 				  uint8_t mode)
 {
-	uint8_t status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
-	return (status == SDVO_CMD_STATUS_SUCCESS);
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
 }
 
-static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
 				       uint8_t mode)
 {
-	uint8_t status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-
-	return (status == SDVO_CMD_STATUS_SUCCESS);
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
 }
 
 #if 0
-static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
 {
 	int i, j;
 	uint8_t set_buf_index[2];
@@ -887,8 +854,7 @@
 	uint8_t buf[48];
 	uint8_t *pos;
 
-	intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
-	intel_sdvo_read_response(encoder, &av_split, 1);
+	intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
 
 	for (i = 0; i <= av_split; i++) {
 		set_buf_index[0] = i; set_buf_index[1] = 0;
@@ -908,7 +874,7 @@
 }
 #endif
 
-static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
 				    int index,
 				    uint8_t *data, int8_t size, uint8_t tx_rate)
 {
@@ -917,15 +883,18 @@
     set_buf_index[0] = index;
     set_buf_index[1] = 0;
 
-    intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
-			 set_buf_index, 2);
+    if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+			      set_buf_index, 2))
+	    return false;
 
     for (; size > 0; size -= 8) {
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
+	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
+		return false;
+
 	data += 8;
     }
 
-    intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+    return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
 }
 
 static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1000,7 +969,7 @@
 	} __attribute__ ((packed)) u;
 } __attribute__((packed));
 
-static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
 					 struct drm_display_mode * mode)
 {
 	struct dip_infoframe avi_if = {
@@ -1011,133 +980,105 @@
 
 	avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
 						    4 + avi_if.len);
-	intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
-				4 + avi_if.len,
-				SDVO_HBUF_TX_VSYNC);
+	return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
+				       4 + avi_if.len,
+				       SDVO_HBUF_TX_VSYNC);
 }
 
-static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
 {
-
 	struct intel_sdvo_tv_format format;
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	uint32_t format_map, i;
-	uint8_t status;
+	uint32_t format_map;
 
-	for (i = 0; i < TV_FORMAT_NUM; i++)
-		if (tv_format_names[i] == sdvo_priv->tv_format_name)
-			break;
-
-	format_map = 1 << i;
+	format_map = 1 << intel_sdvo->tv_format_index;
 	memset(&format, 0, sizeof(format));
-	memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
-			sizeof(format) : sizeof(format_map));
+	memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
-			     sizeof(format));
+	BUILD_BUG_ON(sizeof(format) != 6);
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TV_FORMAT,
+				    &format, sizeof(format));
+}
 
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		DRM_DEBUG_KMS("%s: Failed to set TV format\n",
-			  SDVO_NAME(sdvo_priv));
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+					struct drm_display_mode *mode)
+{
+	struct intel_sdvo_dtd output_dtd;
+
+	if (!intel_sdvo_set_target_output(intel_sdvo,
+					  intel_sdvo->attached_output))
+		return false;
+
+	intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+	if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+		return false;
+
+	return true;
+}
+
+static bool
+intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	struct intel_sdvo_dtd input_dtd;
+
+	/* Reset the input timing to the screen. Assume always input 0. */
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		return false;
+
+	if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+						      mode->clock / 10,
+						      mode->hdisplay,
+						      mode->vdisplay))
+		return false;
+
+	if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+						   &input_dtd))
+		return false;
+
+	intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+	intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
+
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+	mode->clock = adjusted_mode->clock;
+	return true;
 }
 
 static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
 				  struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
 {
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
 
-	if (dev_priv->is_tv) {
-		struct intel_sdvo_dtd output_dtd;
-		bool success;
-
-		/* We need to construct preferred input timings based on our
-		 * output timings.  To do that, we have to set the output
-		 * timings, even though this isn't really the right place in
-		 * the sequence to do it. Oh well.
-		 */
-
-
-		/* Set output timings */
-		intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
-		intel_sdvo_set_target_output(intel_encoder,
-					     dev_priv->attached_output);
-		intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
-
-		/* Set the input timing to the screen. Assume always input 0. */
-		intel_sdvo_set_target_input(intel_encoder, true, false);
-
-
-		success = intel_sdvo_create_preferred_input_timing(intel_encoder,
-								   mode->clock / 10,
-								   mode->hdisplay,
-								   mode->vdisplay);
-		if (success) {
-			struct intel_sdvo_dtd input_dtd;
-
-			intel_sdvo_get_preferred_input_timing(intel_encoder,
-							     &input_dtd);
-			intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
-			dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
-
-			drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-			mode->clock = adjusted_mode->clock;
-
-			adjusted_mode->clock *=
-				intel_sdvo_get_pixel_multiplier(mode);
-		} else {
+	/* We need to construct preferred input timings based on our
+	 * output timings.  To do that, we have to set the output
+	 * timings, even though this isn't really the right place in
+	 * the sequence to do it. Oh well.
+	 */
+	if (intel_sdvo->is_tv) {
+		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
 			return false;
-		}
-	} else if (dev_priv->is_lvds) {
-		struct intel_sdvo_dtd output_dtd;
-		bool success;
 
-		drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
-		/* Set output timings */
-		intel_sdvo_get_dtd_from_mode(&output_dtd,
-				dev_priv->sdvo_lvds_fixed_mode);
-
-		intel_sdvo_set_target_output(intel_encoder,
-					     dev_priv->attached_output);
-		intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
-
-		/* Set the input timing to the screen. Assume always input 0. */
-		intel_sdvo_set_target_input(intel_encoder, true, false);
-
-
-		success = intel_sdvo_create_preferred_input_timing(
-				intel_encoder,
-				mode->clock / 10,
-				mode->hdisplay,
-				mode->vdisplay);
-
-		if (success) {
-			struct intel_sdvo_dtd input_dtd;
-
-			intel_sdvo_get_preferred_input_timing(intel_encoder,
-							     &input_dtd);
-			intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
-			dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
-
-			drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-			mode->clock = adjusted_mode->clock;
-
-			adjusted_mode->clock *=
-				intel_sdvo_get_pixel_multiplier(mode);
-		} else {
+		if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
 			return false;
-		}
+	} else if (intel_sdvo->is_lvds) {
+		drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
 
-	} else {
-		/* Make the CRTC code factor in the SDVO pixel multiplier.  The
-		 * SDVO device will be told of the multiplier during mode_set.
-		 */
-		adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+							    intel_sdvo->sdvo_lvds_fixed_mode))
+			return false;
+
+		if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
+			return false;
 	}
+
+	/* Make the CRTC code factor in the SDVO pixel multiplier.  The
+	 * SDVO device will be told of the multiplier during mode_set.
+	 */
+	adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+
 	return true;
 }
 
@@ -1149,13 +1090,11 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = encoder->crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
 	u32 sdvox = 0;
-	int sdvo_pixel_multiply;
+	int sdvo_pixel_multiply, rate;
 	struct intel_sdvo_in_out_map in_out;
 	struct intel_sdvo_dtd input_dtd;
-	u8 status;
 
 	if (!mode)
 		return;
@@ -1166,41 +1105,50 @@
 	 * channel on the motherboard.  In a two-input device, the first input
 	 * will be SDVOB and the second SDVOC.
 	 */
-	in_out.in0 = sdvo_priv->attached_output;
+	in_out.in0 = intel_sdvo->attached_output;
 	in_out.in1 = 0;
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
-			     &in_out, sizeof(in_out));
-	status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+	if (!intel_sdvo_set_value(intel_sdvo,
+				  SDVO_CMD_SET_IN_OUT_MAP,
+				  &in_out, sizeof(in_out)))
+		return;
 
-	if (sdvo_priv->is_hdmi) {
-		intel_sdvo_set_avi_infoframe(intel_encoder, mode);
+	if (intel_sdvo->is_hdmi) {
+		if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
+			return;
+
 		sdvox |= SDVO_AUDIO_ENABLE;
 	}
 
 	/* We have tried to get input timing in mode_fixup, and filled into
 	   adjusted_mode */
-	if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+	if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
 		intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
-		input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags;
+		input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
 	} else
 		intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
 
 	/* If it's a TV, we already set the output timing in mode_fixup.
 	 * Otherwise, the output timing is equal to the input timing.
 	 */
-	if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
+	if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
 		/* Set the output timing to the screen */
-		intel_sdvo_set_target_output(intel_encoder,
-					     sdvo_priv->attached_output);
-		intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
+		if (!intel_sdvo_set_target_output(intel_sdvo,
+						  intel_sdvo->attached_output))
+			return;
+
+		if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd))
+			return;
 	}
 
 	/* Set the input timing to the screen. Assume always input 0. */
-	intel_sdvo_set_target_input(intel_encoder, true, false);
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		return;
 
-	if (sdvo_priv->is_tv)
-		intel_sdvo_set_tv_format(intel_encoder);
+	if (intel_sdvo->is_tv) {
+		if (!intel_sdvo_set_tv_format(intel_sdvo))
+			return;
+	}
 
 	/* We would like to use intel_sdvo_create_preferred_input_timing() to
 	 * provide the device with a timing it can support, if it supports that
@@ -1217,23 +1165,18 @@
 		intel_sdvo_set_input_timing(encoder, &input_dtd);
 	}
 #else
-	intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
+	if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+		return;
 #endif
 
-	switch (intel_sdvo_get_pixel_multiplier(mode)) {
-	case 1:
-		intel_sdvo_set_clock_rate_mult(intel_encoder,
-					       SDVO_CLOCK_RATE_MULT_1X);
-		break;
-	case 2:
-		intel_sdvo_set_clock_rate_mult(intel_encoder,
-					       SDVO_CLOCK_RATE_MULT_2X);
-		break;
-	case 4:
-		intel_sdvo_set_clock_rate_mult(intel_encoder,
-					       SDVO_CLOCK_RATE_MULT_4X);
-		break;
+	sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
+	switch (sdvo_pixel_multiply) {
+	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+	case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+	case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
 	}
+	if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+		return;
 
 	/* Set the SDVO control regs. */
 	if (IS_I965G(dev)) {
@@ -1243,8 +1186,8 @@
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 			sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
 	} else {
-		sdvox |= I915_READ(sdvo_priv->sdvo_reg);
-		switch (sdvo_priv->sdvo_reg) {
+		sdvox |= I915_READ(intel_sdvo->sdvo_reg);
+		switch (intel_sdvo->sdvo_reg) {
 		case SDVOB:
 			sdvox &= SDVOB_PRESERVE_MASK;
 			break;
@@ -1257,7 +1200,6 @@
 	if (intel_crtc->pipe == 1)
 		sdvox |= SDVO_PIPE_B_SELECT;
 
-	sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
 	if (IS_I965G(dev)) {
 		/* done in crtc_mode_set as the dpll_md reg must be written early */
 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
@@ -1266,28 +1208,28 @@
 		sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
 	}
 
-	if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
+	if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
 		sdvox |= SDVO_STALL_SELECT;
-	intel_sdvo_write_sdvox(intel_encoder, sdvox);
+	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
 }
 
 static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	u32 temp;
 
 	if (mode != DRM_MODE_DPMS_ON) {
-		intel_sdvo_set_active_outputs(intel_encoder, 0);
+		intel_sdvo_set_active_outputs(intel_sdvo, 0);
 		if (0)
-			intel_sdvo_set_encoder_power_state(intel_encoder, mode);
+			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
 
 		if (mode == DRM_MODE_DPMS_OFF) {
-			temp = I915_READ(sdvo_priv->sdvo_reg);
+			temp = I915_READ(intel_sdvo->sdvo_reg);
 			if ((temp & SDVO_ENABLE) != 0) {
-				intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
+				intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
 			}
 		}
 	} else {
@@ -1295,28 +1237,25 @@
 		int i;
 		u8 status;
 
-		temp = I915_READ(sdvo_priv->sdvo_reg);
+		temp = I915_READ(intel_sdvo->sdvo_reg);
 		if ((temp & SDVO_ENABLE) == 0)
-			intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
+			intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
 		for (i = 0; i < 2; i++)
-		  intel_wait_for_vblank(dev);
+			intel_wait_for_vblank(dev, intel_crtc->pipe);
 
-		status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
-						       &input2);
-
-
+		status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
 		/* Warn if the device reported failure to sync.
 		 * A lot of SDVO devices fail to notify of sync, but it's
 		 * a given it the status is a success, we succeeded.
 		 */
 		if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
 			DRM_DEBUG_KMS("First %s output reported failure to "
-					"sync\n", SDVO_NAME(sdvo_priv));
+					"sync\n", SDVO_NAME(intel_sdvo));
 		}
 
 		if (0)
-			intel_sdvo_set_encoder_power_state(intel_encoder, mode);
-		intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
+			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+		intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
 	}
 	return;
 }
@@ -1325,42 +1264,31 @@
 				 struct drm_display_mode *mode)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 
-	if (sdvo_priv->pixel_clock_min > mode->clock)
+	if (intel_sdvo->pixel_clock_min > mode->clock)
 		return MODE_CLOCK_LOW;
 
-	if (sdvo_priv->pixel_clock_max < mode->clock)
+	if (intel_sdvo->pixel_clock_max < mode->clock)
 		return MODE_CLOCK_HIGH;
 
-	if (sdvo_priv->is_lvds == true) {
-		if (sdvo_priv->sdvo_lvds_fixed_mode == NULL)
+	if (intel_sdvo->is_lvds) {
+		if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
 			return MODE_PANEL;
 
-		if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay)
-			return MODE_PANEL;
-
-		if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
+		if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
 			return MODE_PANEL;
 	}
 
 	return MODE_OK;
 }
 
-static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
 {
-	u8 status;
-
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-
-	return true;
+	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
 }
 
 /* No use! */
@@ -1368,12 +1296,12 @@
 struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
 {
 	struct drm_connector *connector = NULL;
-	struct intel_encoder *iout = NULL;
-	struct intel_sdvo_priv *sdvo;
+	struct intel_sdvo *iout = NULL;
+	struct intel_sdvo *sdvo;
 
 	/* find the sdvo connector */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		iout = to_intel_encoder(connector);
+		iout = to_intel_sdvo(connector);
 
 		if (iout->type != INTEL_OUTPUT_SDVO)
 			continue;
@@ -1395,75 +1323,69 @@
 {
 	u8 response[2];
 	u8 status;
-	struct intel_encoder *intel_encoder;
+	struct intel_sdvo *intel_sdvo;
 	DRM_DEBUG_KMS("\n");
 
 	if (!connector)
 		return 0;
 
-	intel_encoder = to_intel_encoder(connector);
+	intel_sdvo = to_intel_sdvo(connector);
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, &response, 2);
-
-	if (response[0] !=0)
-		return 1;
-
-	return 0;
+	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+				    &response, 2) && response[0];
 }
 
 void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
 {
 	u8 response[2];
 	u8 status;
-	struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-	intel_sdvo_read_response(intel_encoder, &response, 2);
+	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+	intel_sdvo_read_response(intel_sdvo, &response, 2);
 
 	if (on) {
-		intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
-		status = intel_sdvo_read_response(intel_encoder, &response, 2);
+		intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+		status = intel_sdvo_read_response(intel_sdvo, &response, 2);
 
-		intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+		intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
 	} else {
 		response[0] = 0;
 		response[1] = 0;
-		intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+		intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
 	}
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
-	intel_sdvo_read_response(intel_encoder, &response, 2);
+	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+	intel_sdvo_read_response(intel_sdvo, &response, 2);
 }
 #endif
 
 static bool
-intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
 {
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 	int caps = 0;
 
-	if (sdvo_priv->caps.output_flags &
+	if (intel_sdvo->caps.output_flags &
 		(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
 		caps++;
-	if (sdvo_priv->caps.output_flags &
+	if (intel_sdvo->caps.output_flags &
 		(SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
 		caps++;
-	if (sdvo_priv->caps.output_flags &
+	if (intel_sdvo->caps.output_flags &
 		(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
 		caps++;
-	if (sdvo_priv->caps.output_flags &
+	if (intel_sdvo->caps.output_flags &
 		(SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
 		caps++;
-	if (sdvo_priv->caps.output_flags &
+	if (intel_sdvo->caps.output_flags &
 		(SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
 		caps++;
 
-	if (sdvo_priv->caps.output_flags &
+	if (intel_sdvo->caps.output_flags &
 		(SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
 		caps++;
 
-	if (sdvo_priv->caps.output_flags &
+	if (intel_sdvo->caps.output_flags &
 		(SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
 		caps++;
 
@@ -1475,11 +1397,11 @@
 {
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
-	struct intel_encoder *intel_encoder;
+	struct intel_sdvo *intel_sdvo;
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		intel_encoder = enc_to_intel_encoder(encoder);
-		if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+		intel_sdvo = enc_to_intel_sdvo(encoder);
+		if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
 			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 				if (encoder == intel_attached_encoder(connector))
 					return connector;
@@ -1493,8 +1415,8 @@
 intel_analog_is_connected(struct drm_device *dev)
 {
 	struct drm_connector *analog_connector;
-	analog_connector = intel_find_analog_connector(dev);
 
+	analog_connector = intel_find_analog_connector(dev);
 	if (!analog_connector)
 		return false;
 
@@ -1509,54 +1431,52 @@
 intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 	enum drm_connector_status status = connector_status_connected;
 	struct edid *edid = NULL;
 
-	edid = drm_get_edid(connector, intel_encoder->ddc_bus);
+	edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
 
 	/* This is only applied to SDVO cards with multiple outputs */
-	if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
+	if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
 		uint8_t saved_ddc, temp_ddc;
-		saved_ddc = sdvo_priv->ddc_bus;
-		temp_ddc = sdvo_priv->ddc_bus >> 1;
+		saved_ddc = intel_sdvo->ddc_bus;
+		temp_ddc = intel_sdvo->ddc_bus >> 1;
 		/*
 		 * Don't use the 1 as the argument of DDC bus switch to get
 		 * the EDID. It is used for SDVO SPD ROM.
 		 */
 		while(temp_ddc > 1) {
-			sdvo_priv->ddc_bus = temp_ddc;
-			edid = drm_get_edid(connector, intel_encoder->ddc_bus);
+			intel_sdvo->ddc_bus = temp_ddc;
+			edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
 			if (edid) {
 				/*
 				 * When we can get the EDID, maybe it is the
 				 * correct DDC bus. Update it.
 				 */
-				sdvo_priv->ddc_bus = temp_ddc;
+				intel_sdvo->ddc_bus = temp_ddc;
 				break;
 			}
 			temp_ddc >>= 1;
 		}
 		if (edid == NULL)
-			sdvo_priv->ddc_bus = saved_ddc;
+			intel_sdvo->ddc_bus = saved_ddc;
 	}
 	/* when there is no edid and no monitor is connected with VGA
 	 * port, try to use the CRT ddc to read the EDID for DVI-connector
 	 */
-	if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+	if (edid == NULL && intel_sdvo->analog_ddc_bus &&
 	    !intel_analog_is_connected(connector->dev))
-		edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+		edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
 
 	if (edid != NULL) {
 		bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
-		bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
+		bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
 
 		/* DDC bus is shared, match EDID to connector type */
 		if (is_digital && need_digital)
-			sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+			intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
 		else if (is_digital != need_digital)
 			status = connector_status_disconnected;
 
@@ -1572,33 +1492,29 @@
 static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
 {
 	uint16_t response;
-	u8 status;
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 	enum drm_connector_status ret;
 
-	intel_sdvo_write_cmd(intel_encoder,
-			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
-	if (sdvo_priv->is_tv) {
+	if (!intel_sdvo_write_cmd(intel_sdvo,
+			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+		return connector_status_unknown;
+	if (intel_sdvo->is_tv) {
 		/* add 30ms delay when the output type is SDVO-TV */
 		mdelay(30);
 	}
-	status = intel_sdvo_read_response(intel_encoder, &response, 2);
+	if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+		return connector_status_unknown;
 
 	DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
 
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return connector_status_unknown;
-
 	if (response == 0)
 		return connector_status_disconnected;
 
-	sdvo_priv->attached_output = response;
+	intel_sdvo->attached_output = response;
 
-	if ((sdvo_connector->output_flag & response) == 0)
+	if ((intel_sdvo_connector->output_flag & response) == 0)
 		ret = connector_status_disconnected;
 	else if (response & SDVO_TMDS_MASK)
 		ret = intel_sdvo_hdmi_sink_detect(connector);
@@ -1607,16 +1523,16 @@
 
 	/* May update encoder flag for like clock for SDVO TV, etc.*/
 	if (ret == connector_status_connected) {
-		sdvo_priv->is_tv = false;
-		sdvo_priv->is_lvds = false;
-		intel_encoder->needs_tv_clock = false;
+		intel_sdvo->is_tv = false;
+		intel_sdvo->is_lvds = false;
+		intel_sdvo->base.needs_tv_clock = false;
 
 		if (response & SDVO_TV_MASK) {
-			sdvo_priv->is_tv = true;
-			intel_encoder->needs_tv_clock = true;
+			intel_sdvo->is_tv = true;
+			intel_sdvo->base.needs_tv_clock = true;
 		}
 		if (response & SDVO_LVDS_MASK)
-			sdvo_priv->is_lvds = true;
+			intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
 	}
 
 	return ret;
@@ -1625,12 +1541,11 @@
 static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
 	int num_modes;
 
 	/* set the bus switch and get the modes */
-	num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+	num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
 
 	/*
 	 * Mac mini hack.  On this device, the DVI-I connector shares one DDC
@@ -1639,11 +1554,11 @@
 	 * which case we'll look there for the digital DDC data.
 	 */
 	if (num_modes == 0 &&
-	    sdvo_priv->analog_ddc_bus &&
+	    intel_sdvo->analog_ddc_bus &&
 	    !intel_analog_is_connected(connector->dev)) {
 		/* Switch to the analog ddc bus and try that
 		 */
-		(void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
+		(void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
 	}
 }
 
@@ -1715,52 +1630,43 @@
 static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
 	struct intel_sdvo_sdtv_resolution_request tv_res;
 	uint32_t reply = 0, format_map = 0;
 	int i;
-	uint8_t status;
-
 
 	/* Read the list of supported input resolutions for the selected TV
 	 * format.
 	 */
-	for (i = 0; i < TV_FORMAT_NUM; i++)
-		if (tv_format_names[i] ==  sdvo_priv->tv_format_name)
-			break;
-
-	format_map = (1 << i);
+	format_map = 1 << intel_sdvo->tv_format_index;
 	memcpy(&tv_res, &format_map,
-	       sizeof(struct intel_sdvo_sdtv_resolution_request) >
-	       sizeof(format_map) ? sizeof(format_map) :
-	       sizeof(struct intel_sdvo_sdtv_resolution_request));
+	       min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
 
-	intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
+	if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+		return;
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
-			     &tv_res, sizeof(tv_res));
-	status = intel_sdvo_read_response(intel_encoder, &reply, 3);
-	if (status != SDVO_CMD_STATUS_SUCCESS)
+	BUILD_BUG_ON(sizeof(tv_res) != 3);
+	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+				  &tv_res, sizeof(tv_res)))
+		return;
+	if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
 		return;
 
 	for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
 		if (reply & (1 << i)) {
 			struct drm_display_mode *nmode;
 			nmode = drm_mode_duplicate(connector->dev,
-					&sdvo_tv_modes[i]);
+						   &sdvo_tv_modes[i]);
 			if (nmode)
 				drm_mode_probed_add(connector, nmode);
 		}
-
 }
 
 static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 	struct drm_display_mode *newmode;
 
 	/*
@@ -1768,7 +1674,7 @@
 	 * Assume that the preferred modes are
 	 * arranged in priority order.
 	 */
-	intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+	intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
 	if (list_empty(&connector->probed_modes) == false)
 		goto end;
 
@@ -1787,8 +1693,9 @@
 end:
 	list_for_each_entry(newmode, &connector->probed_modes, head) {
 		if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
-			sdvo_priv->sdvo_lvds_fixed_mode =
+			intel_sdvo->sdvo_lvds_fixed_mode =
 				drm_mode_duplicate(connector->dev, newmode);
+			intel_sdvo->is_lvds = true;
 			break;
 		}
 	}
@@ -1797,66 +1704,67 @@
 
 static int intel_sdvo_get_modes(struct drm_connector *connector)
 {
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 
-	if (IS_TV(sdvo_connector))
+	if (IS_TV(intel_sdvo_connector))
 		intel_sdvo_get_tv_modes(connector);
-	else if (IS_LVDS(sdvo_connector))
+	else if (IS_LVDS(intel_sdvo_connector))
 		intel_sdvo_get_lvds_modes(connector);
 	else
 		intel_sdvo_get_ddc_modes(connector);
 
-	if (list_empty(&connector->probed_modes))
-		return 0;
-	return 1;
+	return !list_empty(&connector->probed_modes);
 }
 
-static
-void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+static void
+intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
 {
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 	struct drm_device *dev = connector->dev;
 
-	if (IS_TV(sdvo_priv)) {
-		if (sdvo_priv->left_property)
-			drm_property_destroy(dev, sdvo_priv->left_property);
-		if (sdvo_priv->right_property)
-			drm_property_destroy(dev, sdvo_priv->right_property);
-		if (sdvo_priv->top_property)
-			drm_property_destroy(dev, sdvo_priv->top_property);
-		if (sdvo_priv->bottom_property)
-			drm_property_destroy(dev, sdvo_priv->bottom_property);
-		if (sdvo_priv->hpos_property)
-			drm_property_destroy(dev, sdvo_priv->hpos_property);
-		if (sdvo_priv->vpos_property)
-			drm_property_destroy(dev, sdvo_priv->vpos_property);
-		if (sdvo_priv->saturation_property)
-			drm_property_destroy(dev,
-					sdvo_priv->saturation_property);
-		if (sdvo_priv->contrast_property)
-			drm_property_destroy(dev,
-					sdvo_priv->contrast_property);
-		if (sdvo_priv->hue_property)
-			drm_property_destroy(dev, sdvo_priv->hue_property);
-	}
-	if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
-		if (sdvo_priv->brightness_property)
-			drm_property_destroy(dev,
-					sdvo_priv->brightness_property);
-	}
-	return;
+	if (intel_sdvo_connector->left)
+		drm_property_destroy(dev, intel_sdvo_connector->left);
+	if (intel_sdvo_connector->right)
+		drm_property_destroy(dev, intel_sdvo_connector->right);
+	if (intel_sdvo_connector->top)
+		drm_property_destroy(dev, intel_sdvo_connector->top);
+	if (intel_sdvo_connector->bottom)
+		drm_property_destroy(dev, intel_sdvo_connector->bottom);
+	if (intel_sdvo_connector->hpos)
+		drm_property_destroy(dev, intel_sdvo_connector->hpos);
+	if (intel_sdvo_connector->vpos)
+		drm_property_destroy(dev, intel_sdvo_connector->vpos);
+	if (intel_sdvo_connector->saturation)
+		drm_property_destroy(dev, intel_sdvo_connector->saturation);
+	if (intel_sdvo_connector->contrast)
+		drm_property_destroy(dev, intel_sdvo_connector->contrast);
+	if (intel_sdvo_connector->hue)
+		drm_property_destroy(dev, intel_sdvo_connector->hue);
+	if (intel_sdvo_connector->sharpness)
+		drm_property_destroy(dev, intel_sdvo_connector->sharpness);
+	if (intel_sdvo_connector->flicker_filter)
+		drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
+	if (intel_sdvo_connector->flicker_filter_2d)
+		drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
+	if (intel_sdvo_connector->flicker_filter_adaptive)
+		drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
+	if (intel_sdvo_connector->tv_luma_filter)
+		drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
+	if (intel_sdvo_connector->tv_chroma_filter)
+		drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
+	if (intel_sdvo_connector->dot_crawl)
+		drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
+	if (intel_sdvo_connector->brightness)
+		drm_property_destroy(dev, intel_sdvo_connector->brightness);
 }
 
 static void intel_sdvo_destroy(struct drm_connector *connector)
 {
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 
-	if (sdvo_connector->tv_format_property)
+	if (intel_sdvo_connector->tv_format)
 		drm_property_destroy(connector->dev,
-				     sdvo_connector->tv_format_property);
+				     intel_sdvo_connector->tv_format);
 
 	intel_sdvo_destroy_enhance_property(connector);
 	drm_sysfs_connector_remove(connector);
@@ -1870,132 +1778,118 @@
 			uint64_t val)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
-	struct drm_crtc *crtc = encoder->crtc;
-	int ret = 0;
-	bool changed = false;
-	uint8_t cmd, status;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 	uint16_t temp_value;
+	uint8_t cmd;
+	int ret;
 
 	ret = drm_connector_property_set_value(connector, property, val);
-	if (ret < 0)
-		goto out;
+	if (ret)
+		return ret;
 
-	if (property == sdvo_connector->tv_format_property) {
-		if (val >= TV_FORMAT_NUM) {
-			ret = -EINVAL;
-			goto out;
-		}
-		if (sdvo_priv->tv_format_name ==
-		    sdvo_connector->tv_format_supported[val])
-			goto out;
-
-		sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
-		changed = true;
+#define CHECK_PROPERTY(name, NAME) \
+	if (intel_sdvo_connector->name == property) { \
+		if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+		if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+		cmd = SDVO_CMD_SET_##NAME; \
+		intel_sdvo_connector->cur_##name = temp_value; \
+		goto set_value; \
 	}
 
-	if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
-		cmd = 0;
+	if (property == intel_sdvo_connector->tv_format) {
+		if (val >= TV_FORMAT_NUM)
+			return -EINVAL;
+
+		if (intel_sdvo->tv_format_index ==
+		    intel_sdvo_connector->tv_format_supported[val])
+			return 0;
+
+		intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+		goto done;
+	} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
 		temp_value = val;
-		if (sdvo_connector->left_property == property) {
+		if (intel_sdvo_connector->left == property) {
 			drm_connector_property_set_value(connector,
-				sdvo_connector->right_property, val);
-			if (sdvo_connector->left_margin == temp_value)
-				goto out;
+							 intel_sdvo_connector->right, val);
+			if (intel_sdvo_connector->left_margin == temp_value)
+				return 0;
 
-			sdvo_connector->left_margin = temp_value;
-			sdvo_connector->right_margin = temp_value;
-			temp_value = sdvo_connector->max_hscan -
-					sdvo_connector->left_margin;
+			intel_sdvo_connector->left_margin = temp_value;
+			intel_sdvo_connector->right_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_hscan -
+				intel_sdvo_connector->left_margin;
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
-		} else if (sdvo_connector->right_property == property) {
+			goto set_value;
+		} else if (intel_sdvo_connector->right == property) {
 			drm_connector_property_set_value(connector,
-				sdvo_connector->left_property, val);
-			if (sdvo_connector->right_margin == temp_value)
-				goto out;
+							 intel_sdvo_connector->left, val);
+			if (intel_sdvo_connector->right_margin == temp_value)
+				return 0;
 
-			sdvo_connector->left_margin = temp_value;
-			sdvo_connector->right_margin = temp_value;
-			temp_value = sdvo_connector->max_hscan -
-				sdvo_connector->left_margin;
+			intel_sdvo_connector->left_margin = temp_value;
+			intel_sdvo_connector->right_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_hscan -
+				intel_sdvo_connector->left_margin;
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
-		} else if (sdvo_connector->top_property == property) {
+			goto set_value;
+		} else if (intel_sdvo_connector->top == property) {
 			drm_connector_property_set_value(connector,
-				sdvo_connector->bottom_property, val);
-			if (sdvo_connector->top_margin == temp_value)
-				goto out;
+							 intel_sdvo_connector->bottom, val);
+			if (intel_sdvo_connector->top_margin == temp_value)
+				return 0;
 
-			sdvo_connector->top_margin = temp_value;
-			sdvo_connector->bottom_margin = temp_value;
-			temp_value = sdvo_connector->max_vscan -
-					sdvo_connector->top_margin;
+			intel_sdvo_connector->top_margin = temp_value;
+			intel_sdvo_connector->bottom_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_vscan -
+				intel_sdvo_connector->top_margin;
 			cmd = SDVO_CMD_SET_OVERSCAN_V;
-		} else if (sdvo_connector->bottom_property == property) {
+			goto set_value;
+		} else if (intel_sdvo_connector->bottom == property) {
 			drm_connector_property_set_value(connector,
-				sdvo_connector->top_property, val);
-			if (sdvo_connector->bottom_margin == temp_value)
-				goto out;
-			sdvo_connector->top_margin = temp_value;
-			sdvo_connector->bottom_margin = temp_value;
-			temp_value = sdvo_connector->max_vscan -
-					sdvo_connector->top_margin;
+							 intel_sdvo_connector->top, val);
+			if (intel_sdvo_connector->bottom_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->top_margin = temp_value;
+			intel_sdvo_connector->bottom_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_vscan -
+				intel_sdvo_connector->top_margin;
 			cmd = SDVO_CMD_SET_OVERSCAN_V;
-		} else if (sdvo_connector->hpos_property == property) {
-			if (sdvo_connector->cur_hpos == temp_value)
-				goto out;
-
-			cmd = SDVO_CMD_SET_POSITION_H;
-			sdvo_connector->cur_hpos = temp_value;
-		} else if (sdvo_connector->vpos_property == property) {
-			if (sdvo_connector->cur_vpos == temp_value)
-				goto out;
-
-			cmd = SDVO_CMD_SET_POSITION_V;
-			sdvo_connector->cur_vpos = temp_value;
-		} else if (sdvo_connector->saturation_property == property) {
-			if (sdvo_connector->cur_saturation == temp_value)
-				goto out;
-
-			cmd = SDVO_CMD_SET_SATURATION;
-			sdvo_connector->cur_saturation = temp_value;
-		} else if (sdvo_connector->contrast_property == property) {
-			if (sdvo_connector->cur_contrast == temp_value)
-				goto out;
-
-			cmd = SDVO_CMD_SET_CONTRAST;
-			sdvo_connector->cur_contrast = temp_value;
-		} else if (sdvo_connector->hue_property == property) {
-			if (sdvo_connector->cur_hue == temp_value)
-				goto out;
-
-			cmd = SDVO_CMD_SET_HUE;
-			sdvo_connector->cur_hue = temp_value;
-		} else if (sdvo_connector->brightness_property == property) {
-			if (sdvo_connector->cur_brightness == temp_value)
-				goto out;
-
-			cmd = SDVO_CMD_SET_BRIGHTNESS;
-			sdvo_connector->cur_brightness = temp_value;
+			goto set_value;
 		}
-		if (cmd) {
-			intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
-			status = intel_sdvo_read_response(intel_encoder,
-								NULL, 0);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO command \n");
-				return -EINVAL;
-			}
-			changed = true;
-		}
+		CHECK_PROPERTY(hpos, HPOS)
+		CHECK_PROPERTY(vpos, VPOS)
+		CHECK_PROPERTY(saturation, SATURATION)
+		CHECK_PROPERTY(contrast, CONTRAST)
+		CHECK_PROPERTY(hue, HUE)
+		CHECK_PROPERTY(brightness, BRIGHTNESS)
+		CHECK_PROPERTY(sharpness, SHARPNESS)
+		CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+		CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+		CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+		CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+		CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+		CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
 	}
-	if (changed && crtc)
+
+	return -EINVAL; /* unknown property */
+
+set_value:
+	if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+		return -EIO;
+
+
+done:
+	if (encoder->crtc) {
+		struct drm_crtc *crtc = encoder->crtc;
+
 		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
-				crtc->y, crtc->fb);
-out:
-	return ret;
+					 crtc->y, crtc->fb);
+	}
+
+	return 0;
+#undef CHECK_PROPERTY
 }
 
 static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
@@ -2022,22 +1916,16 @@
 
 static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
 {
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
 
-	if (intel_encoder->i2c_bus)
-		intel_i2c_destroy(intel_encoder->i2c_bus);
-	if (intel_encoder->ddc_bus)
-		intel_i2c_destroy(intel_encoder->ddc_bus);
-	if (sdvo_priv->analog_ddc_bus)
-		intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+	if (intel_sdvo->analog_ddc_bus)
+		intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
 
-	if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+	if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
 		drm_mode_destroy(encoder->dev,
-				 sdvo_priv->sdvo_lvds_fixed_mode);
+				 intel_sdvo->sdvo_lvds_fixed_mode);
 
-	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
+	intel_encoder_destroy(encoder);
 }
 
 static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2054,7 +1942,7 @@
  */
 static void
 intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
-			  struct intel_sdvo_priv *sdvo, u32 reg)
+			  struct intel_sdvo *sdvo, u32 reg)
 {
 	struct sdvo_device_mapping *mapping;
 
@@ -2067,57 +1955,46 @@
 }
 
 static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
+intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
 {
-	struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
-	uint8_t status;
-
-	if (device == 0)
-		intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
-	else
-		intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
-
-	intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
-	status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return false;
-	return true;
+	return intel_sdvo_set_target_output(intel_sdvo,
+					    device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
+		intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+				     &intel_sdvo->is_hdmi, 1);
 }
 
-static struct intel_encoder *
-intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
+static struct intel_sdvo *
+intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
 {
 	struct drm_device *dev = chan->drm_dev;
 	struct drm_encoder *encoder;
-	struct intel_encoder *intel_encoder = NULL;
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		intel_encoder = enc_to_intel_encoder(encoder);
-		if (intel_encoder->ddc_bus == &chan->adapter)
-			break;
+		struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+		if (intel_sdvo->base.ddc_bus == &chan->adapter)
+			return intel_sdvo;
 	}
-	return intel_encoder;
+
+	return NULL;
 }
 
 static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
 				  struct i2c_msg msgs[], int num)
 {
-	struct intel_encoder *intel_encoder;
-	struct intel_sdvo_priv *sdvo_priv;
+	struct intel_sdvo *intel_sdvo;
 	struct i2c_algo_bit_data *algo_data;
 	const struct i2c_algorithm *algo;
 
 	algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
-	intel_encoder =
-		intel_sdvo_chan_to_intel_encoder(
-				(struct intel_i2c_chan *)(algo_data->data));
-	if (intel_encoder == NULL)
+	intel_sdvo =
+		intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
+					      (algo_data->data));
+	if (intel_sdvo == NULL)
 		return -EINVAL;
 
-	sdvo_priv = intel_encoder->dev_priv;
-	algo = intel_encoder->i2c_bus->algo;
+	algo = intel_sdvo->base.i2c_bus->algo;
 
-	intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
+	intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
 	return algo->master_xfer(i2c_adap, msgs, num);
 }
 
@@ -2162,27 +2039,9 @@
 		return 0x72;
 }
 
-static bool
-intel_sdvo_connector_alloc (struct intel_connector **ret)
-{
-	struct intel_connector *intel_connector;
-	struct intel_sdvo_connector *sdvo_connector;
-
-	*ret = kzalloc(sizeof(*intel_connector) +
-			sizeof(*sdvo_connector), GFP_KERNEL);
-	if (!*ret)
-		return false;
-
-	intel_connector = *ret;
-	sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
-	intel_connector->dev_priv = sdvo_connector;
-
-	return true;
-}
-
 static void
-intel_sdvo_connector_create (struct drm_encoder *encoder,
-			     struct drm_connector *connector)
+intel_sdvo_connector_init(struct drm_encoder *encoder,
+			  struct drm_connector *connector)
 {
 	drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
 			   connector->connector_type);
@@ -2198,582 +2057,470 @@
 }
 
 static bool
-intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 {
-	struct drm_encoder *encoder = &intel_encoder->enc;
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+	struct drm_encoder *encoder = &intel_sdvo->base.enc;
 	struct drm_connector *connector;
 	struct intel_connector *intel_connector;
-	struct intel_sdvo_connector *sdvo_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	if (!intel_sdvo_connector_alloc(&intel_connector))
+	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	if (!intel_sdvo_connector)
 		return false;
 
-	sdvo_connector = intel_connector->dev_priv;
-
 	if (device == 0) {
-		sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
-		sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
 	} else if (device == 1) {
-		sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
-		sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
 	}
 
+	intel_connector = &intel_sdvo_connector->base;
 	connector = &intel_connector->base;
 	connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
 	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
 	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
-	if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
-		&& intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
-		&& sdvo_priv->is_hdmi) {
+	if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
+		&& intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
+		&& intel_sdvo->is_hdmi) {
 		/* enable hdmi encoding mode if supported */
-		intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
-		intel_sdvo_set_colorimetry(intel_encoder,
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+		intel_sdvo_set_colorimetry(intel_sdvo,
 					   SDVO_COLORIMETRY_RGB256);
 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
 	}
-	intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-				    (1 << INTEL_ANALOG_CLONE_BIT);
+	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+				       (1 << INTEL_ANALOG_CLONE_BIT));
 
-	intel_sdvo_connector_create(encoder, connector);
+	intel_sdvo_connector_init(encoder, connector);
 
 	return true;
 }
 
 static bool
-intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
 {
-        struct drm_encoder *encoder = &intel_encoder->enc;
-        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_encoder *encoder = &intel_sdvo->base.enc;
         struct drm_connector *connector;
         struct intel_connector *intel_connector;
-        struct intel_sdvo_connector *sdvo_connector;
+        struct intel_sdvo_connector *intel_sdvo_connector;
 
-        if (!intel_sdvo_connector_alloc(&intel_connector))
-                return false;
+	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	if (!intel_sdvo_connector)
+		return false;
 
+	intel_connector = &intel_sdvo_connector->base;
         connector = &intel_connector->base;
         encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
         connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
-        sdvo_connector = intel_connector->dev_priv;
 
-        sdvo_priv->controlled_output |= type;
-        sdvo_connector->output_flag = type;
+        intel_sdvo->controlled_output |= type;
+        intel_sdvo_connector->output_flag = type;
 
-        sdvo_priv->is_tv = true;
-        intel_encoder->needs_tv_clock = true;
-        intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+        intel_sdvo->is_tv = true;
+        intel_sdvo->base.needs_tv_clock = true;
+        intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
 
-        intel_sdvo_connector_create(encoder, connector);
+        intel_sdvo_connector_init(encoder, connector);
 
-        intel_sdvo_tv_create_property(connector, type);
+        if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+		goto err;
 
-        intel_sdvo_create_enhance_property(connector);
+        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+		goto err;
 
         return true;
+
+err:
+	intel_sdvo_destroy_enhance_property(connector);
+	kfree(intel_sdvo_connector);
+	return false;
 }
 
 static bool
-intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
 {
-        struct drm_encoder *encoder = &intel_encoder->enc;
-        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_encoder *encoder = &intel_sdvo->base.enc;
         struct drm_connector *connector;
         struct intel_connector *intel_connector;
-        struct intel_sdvo_connector *sdvo_connector;
+        struct intel_sdvo_connector *intel_sdvo_connector;
 
-        if (!intel_sdvo_connector_alloc(&intel_connector))
-                return false;
+	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	if (!intel_sdvo_connector)
+		return false;
 
+	intel_connector = &intel_sdvo_connector->base;
         connector = &intel_connector->base;
 	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
         encoder->encoder_type = DRM_MODE_ENCODER_DAC;
         connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-        sdvo_connector = intel_connector->dev_priv;
 
         if (device == 0) {
-                sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
-                sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+                intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
         } else if (device == 1) {
-                sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
-                sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+                intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
         }
 
-        intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-                                    (1 << INTEL_ANALOG_CLONE_BIT);
+        intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+				       (1 << INTEL_ANALOG_CLONE_BIT));
 
-        intel_sdvo_connector_create(encoder, connector);
+        intel_sdvo_connector_init(encoder, connector);
         return true;
 }
 
 static bool
-intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
 {
-        struct drm_encoder *encoder = &intel_encoder->enc;
-        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_encoder *encoder = &intel_sdvo->base.enc;
         struct drm_connector *connector;
         struct intel_connector *intel_connector;
-        struct intel_sdvo_connector *sdvo_connector;
+        struct intel_sdvo_connector *intel_sdvo_connector;
 
-        if (!intel_sdvo_connector_alloc(&intel_connector))
-                return false;
+	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	if (!intel_sdvo_connector)
+		return false;
 
-        connector = &intel_connector->base;
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
         encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
         connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-        sdvo_connector = intel_connector->dev_priv;
-
-        sdvo_priv->is_lvds = true;
 
         if (device == 0) {
-                sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
-                sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+                intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
         } else if (device == 1) {
-                sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
-                sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+                intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
         }
 
-        intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
-                                    (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+        intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+				       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
 
-        intel_sdvo_connector_create(encoder, connector);
-        intel_sdvo_create_enhance_property(connector);
-        return true;
+        intel_sdvo_connector_init(encoder, connector);
+        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+		goto err;
+
+	return true;
+
+err:
+	intel_sdvo_destroy_enhance_property(connector);
+	kfree(intel_sdvo_connector);
+	return false;
 }
 
 static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
 {
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-
-	sdvo_priv->is_tv = false;
-	intel_encoder->needs_tv_clock = false;
-	sdvo_priv->is_lvds = false;
+	intel_sdvo->is_tv = false;
+	intel_sdvo->base.needs_tv_clock = false;
+	intel_sdvo->is_lvds = false;
 
 	/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
 
 	if (flags & SDVO_OUTPUT_TMDS0)
-		if (!intel_sdvo_dvi_init(intel_encoder, 0))
+		if (!intel_sdvo_dvi_init(intel_sdvo, 0))
 			return false;
 
 	if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
-		if (!intel_sdvo_dvi_init(intel_encoder, 1))
+		if (!intel_sdvo_dvi_init(intel_sdvo, 1))
 			return false;
 
 	/* TV has no XXX1 function block */
 	if (flags & SDVO_OUTPUT_SVID0)
-		if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
 			return false;
 
 	if (flags & SDVO_OUTPUT_CVBS0)
-		if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
 			return false;
 
 	if (flags & SDVO_OUTPUT_RGB0)
-		if (!intel_sdvo_analog_init(intel_encoder, 0))
+		if (!intel_sdvo_analog_init(intel_sdvo, 0))
 			return false;
 
 	if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
-		if (!intel_sdvo_analog_init(intel_encoder, 1))
+		if (!intel_sdvo_analog_init(intel_sdvo, 1))
 			return false;
 
 	if (flags & SDVO_OUTPUT_LVDS0)
-		if (!intel_sdvo_lvds_init(intel_encoder, 0))
+		if (!intel_sdvo_lvds_init(intel_sdvo, 0))
 			return false;
 
 	if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
-		if (!intel_sdvo_lvds_init(intel_encoder, 1))
+		if (!intel_sdvo_lvds_init(intel_sdvo, 1))
 			return false;
 
 	if ((flags & SDVO_OUTPUT_MASK) == 0) {
 		unsigned char bytes[2];
 
-		sdvo_priv->controlled_output = 0;
-		memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
+		intel_sdvo->controlled_output = 0;
+		memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
 		DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
-			      SDVO_NAME(sdvo_priv),
+			      SDVO_NAME(intel_sdvo),
 			      bytes[0], bytes[1]);
 		return false;
 	}
-	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+	intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
 
 	return true;
 }
 
-static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+					  struct intel_sdvo_connector *intel_sdvo_connector,
+					  int type)
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+	struct drm_device *dev = intel_sdvo->base.enc.dev;
 	struct intel_sdvo_tv_format format;
 	uint32_t format_map, i;
-	uint8_t status;
 
-	intel_sdvo_set_target_output(intel_encoder, type);
+	if (!intel_sdvo_set_target_output(intel_sdvo, type))
+		return false;
 
-	intel_sdvo_write_cmd(intel_encoder,
-			     SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder,
-					  &format, sizeof(format));
-	if (status != SDVO_CMD_STATUS_SUCCESS)
-		return;
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+				  &format, sizeof(format)))
+		return false;
 
-	memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ?
-	       sizeof(format_map) : sizeof(format));
+	memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
 
 	if (format_map == 0)
-		return;
+		return false;
 
-	sdvo_connector->format_supported_num = 0;
+	intel_sdvo_connector->format_supported_num = 0;
 	for (i = 0 ; i < TV_FORMAT_NUM; i++)
-		if (format_map & (1 << i)) {
-			sdvo_connector->tv_format_supported
-			[sdvo_connector->format_supported_num++] =
-			tv_format_names[i];
-		}
+		if (format_map & (1 << i))
+			intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
 
 
-	sdvo_connector->tv_format_property =
-			drm_property_create(
-				connector->dev, DRM_MODE_PROP_ENUM,
-				"mode", sdvo_connector->format_supported_num);
+	intel_sdvo_connector->tv_format =
+			drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					    "mode", intel_sdvo_connector->format_supported_num);
+	if (!intel_sdvo_connector->tv_format)
+		return false;
 
-	for (i = 0; i < sdvo_connector->format_supported_num; i++)
+	for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
 		drm_property_add_enum(
-				sdvo_connector->tv_format_property, i,
-				i, sdvo_connector->tv_format_supported[i]);
+				intel_sdvo_connector->tv_format, i,
+				i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
 
-	sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
-	drm_connector_attach_property(
-			connector, sdvo_connector->tv_format_property, 0);
+	intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+	drm_connector_attach_property(&intel_sdvo_connector->base.base,
+				      intel_sdvo_connector->tv_format, 0);
+	return true;
 
 }
 
-static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
+#define ENHANCEMENT(name, NAME) do { \
+	if (enhancements.name) { \
+		if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+		    !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+			return false; \
+		intel_sdvo_connector->max_##name = data_value[0]; \
+		intel_sdvo_connector->cur_##name = response; \
+		intel_sdvo_connector->name = \
+			drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+		if (!intel_sdvo_connector->name) return false; \
+		intel_sdvo_connector->name->values[0] = 0; \
+		intel_sdvo_connector->name->values[1] = data_value[0]; \
+		drm_connector_attach_property(connector, \
+					      intel_sdvo_connector->name, \
+					      intel_sdvo_connector->cur_##name); \
+		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+			      data_value[0], data_value[1], response); \
+	} \
+} while(0)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+				      struct intel_sdvo_connector *intel_sdvo_connector,
+				      struct intel_sdvo_enhancements_reply enhancements)
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
-	struct intel_sdvo_enhancements_reply sdvo_data;
-	struct drm_device *dev = connector->dev;
-	uint8_t status;
+	struct drm_device *dev = intel_sdvo->base.enc.dev;
+	struct drm_connector *connector = &intel_sdvo_connector->base.base;
 	uint16_t response, data_value[2];
 
-	intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
-						NULL, 0);
-	status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
-					sizeof(sdvo_data));
-	if (status != SDVO_CMD_STATUS_SUCCESS) {
-		DRM_DEBUG_KMS(" incorrect response is returned\n");
-		return;
+	/* when horizontal overscan is supported, Add the left/right  property */
+	if (enhancements.overscan_h) {
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_MAX_OVERSCAN_H,
+					  &data_value, 4))
+			return false;
+
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_OVERSCAN_H,
+					  &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_hscan = data_value[0];
+		intel_sdvo_connector->left_margin = data_value[0] - response;
+		intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+		intel_sdvo_connector->left =
+			drm_property_create(dev, DRM_MODE_PROP_RANGE,
+					    "left_margin", 2);
+		if (!intel_sdvo_connector->left)
+			return false;
+
+		intel_sdvo_connector->left->values[0] = 0;
+		intel_sdvo_connector->left->values[1] = data_value[0];
+		drm_connector_attach_property(connector,
+					      intel_sdvo_connector->left,
+					      intel_sdvo_connector->left_margin);
+
+		intel_sdvo_connector->right =
+			drm_property_create(dev, DRM_MODE_PROP_RANGE,
+					    "right_margin", 2);
+		if (!intel_sdvo_connector->right)
+			return false;
+
+		intel_sdvo_connector->right->values[0] = 0;
+		intel_sdvo_connector->right->values[1] = data_value[0];
+		drm_connector_attach_property(connector,
+					      intel_sdvo_connector->right,
+					      intel_sdvo_connector->right_margin);
+		DRM_DEBUG_KMS("h_overscan: max %d, "
+			      "default %d, current %d\n",
+			      data_value[0], data_value[1], response);
 	}
-	response = *((uint16_t *)&sdvo_data);
-	if (!response) {
+
+	if (enhancements.overscan_v) {
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_MAX_OVERSCAN_V,
+					  &data_value, 4))
+			return false;
+
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_OVERSCAN_V,
+					  &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_vscan = data_value[0];
+		intel_sdvo_connector->top_margin = data_value[0] - response;
+		intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+		intel_sdvo_connector->top =
+			drm_property_create(dev, DRM_MODE_PROP_RANGE,
+					    "top_margin", 2);
+		if (!intel_sdvo_connector->top)
+			return false;
+
+		intel_sdvo_connector->top->values[0] = 0;
+		intel_sdvo_connector->top->values[1] = data_value[0];
+		drm_connector_attach_property(connector,
+					      intel_sdvo_connector->top,
+					      intel_sdvo_connector->top_margin);
+
+		intel_sdvo_connector->bottom =
+			drm_property_create(dev, DRM_MODE_PROP_RANGE,
+					    "bottom_margin", 2);
+		if (!intel_sdvo_connector->bottom)
+			return false;
+
+		intel_sdvo_connector->bottom->values[0] = 0;
+		intel_sdvo_connector->bottom->values[1] = data_value[0];
+		drm_connector_attach_property(connector,
+					      intel_sdvo_connector->bottom,
+					      intel_sdvo_connector->bottom_margin);
+		DRM_DEBUG_KMS("v_overscan: max %d, "
+			      "default %d, current %d\n",
+			      data_value[0], data_value[1], response);
+	}
+
+	ENHANCEMENT(hpos, HPOS);
+	ENHANCEMENT(vpos, VPOS);
+	ENHANCEMENT(saturation, SATURATION);
+	ENHANCEMENT(contrast, CONTRAST);
+	ENHANCEMENT(hue, HUE);
+	ENHANCEMENT(sharpness, SHARPNESS);
+	ENHANCEMENT(brightness, BRIGHTNESS);
+	ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+	ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+	ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+	ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+	ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+	if (enhancements.dot_crawl) {
+		if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_dot_crawl = 1;
+		intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+		intel_sdvo_connector->dot_crawl =
+			drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+		if (!intel_sdvo_connector->dot_crawl)
+			return false;
+
+		intel_sdvo_connector->dot_crawl->values[0] = 0;
+		intel_sdvo_connector->dot_crawl->values[1] = 1;
+		drm_connector_attach_property(connector,
+					      intel_sdvo_connector->dot_crawl,
+					      intel_sdvo_connector->cur_dot_crawl);
+		DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+	}
+
+	return true;
+}
+
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+					struct intel_sdvo_connector *intel_sdvo_connector,
+					struct intel_sdvo_enhancements_reply enhancements)
+{
+	struct drm_device *dev = intel_sdvo->base.enc.dev;
+	struct drm_connector *connector = &intel_sdvo_connector->base.base;
+	uint16_t response, data_value[2];
+
+	ENHANCEMENT(brightness, BRIGHTNESS);
+
+	return true;
+}
+#undef ENHANCEMENT
+
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+					       struct intel_sdvo_connector *intel_sdvo_connector)
+{
+	union {
+		struct intel_sdvo_enhancements_reply reply;
+		uint16_t response;
+	} enhancements;
+
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+				  &enhancements, sizeof(enhancements)))
+		return false;
+
+	if (enhancements.response == 0) {
 		DRM_DEBUG_KMS("No enhancement is supported\n");
-		return;
+		return true;
 	}
-	if (IS_TV(sdvo_priv)) {
-		/* when horizontal overscan is supported, Add the left/right
-		 * property
-		 */
-		if (sdvo_data.overscan_h) {
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&data_value, 4);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO max "
-						"h_overscan\n");
-				return;
-			}
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&response, 2);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
-				return;
-			}
-			sdvo_priv->max_hscan = data_value[0];
-			sdvo_priv->left_margin = data_value[0] - response;
-			sdvo_priv->right_margin = sdvo_priv->left_margin;
-			sdvo_priv->left_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"left_margin", 2);
-			sdvo_priv->left_property->values[0] = 0;
-			sdvo_priv->left_property->values[1] = data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->left_property,
-						sdvo_priv->left_margin);
-			sdvo_priv->right_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"right_margin", 2);
-			sdvo_priv->right_property->values[0] = 0;
-			sdvo_priv->right_property->values[1] = data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->right_property,
-						sdvo_priv->right_margin);
-			DRM_DEBUG_KMS("h_overscan: max %d, "
-					"default %d, current %d\n",
-					data_value[0], data_value[1], response);
-		}
-		if (sdvo_data.overscan_v) {
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&data_value, 4);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO max "
-						"v_overscan\n");
-				return;
-			}
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&response, 2);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
-				return;
-			}
-			sdvo_priv->max_vscan = data_value[0];
-			sdvo_priv->top_margin = data_value[0] - response;
-			sdvo_priv->bottom_margin = sdvo_priv->top_margin;
-			sdvo_priv->top_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"top_margin", 2);
-			sdvo_priv->top_property->values[0] = 0;
-			sdvo_priv->top_property->values[1] = data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->top_property,
-						sdvo_priv->top_margin);
-			sdvo_priv->bottom_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"bottom_margin", 2);
-			sdvo_priv->bottom_property->values[0] = 0;
-			sdvo_priv->bottom_property->values[1] = data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->bottom_property,
-						sdvo_priv->bottom_margin);
-			DRM_DEBUG_KMS("v_overscan: max %d, "
-					"default %d, current %d\n",
-					data_value[0], data_value[1], response);
-		}
-		if (sdvo_data.position_h) {
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&data_value, 4);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
-				return;
-			}
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_POSITION_H, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&response, 2);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
-				return;
-			}
-			sdvo_priv->max_hpos = data_value[0];
-			sdvo_priv->cur_hpos = response;
-			sdvo_priv->hpos_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"hpos", 2);
-			sdvo_priv->hpos_property->values[0] = 0;
-			sdvo_priv->hpos_property->values[1] = data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->hpos_property,
-						sdvo_priv->cur_hpos);
-			DRM_DEBUG_KMS("h_position: max %d, "
-					"default %d, current %d\n",
-					data_value[0], data_value[1], response);
-		}
-		if (sdvo_data.position_v) {
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&data_value, 4);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
-				return;
-			}
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_POSITION_V, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&response, 2);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
-				return;
-			}
-			sdvo_priv->max_vpos = data_value[0];
-			sdvo_priv->cur_vpos = response;
-			sdvo_priv->vpos_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"vpos", 2);
-			sdvo_priv->vpos_property->values[0] = 0;
-			sdvo_priv->vpos_property->values[1] = data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->vpos_property,
-						sdvo_priv->cur_vpos);
-			DRM_DEBUG_KMS("v_position: max %d, "
-					"default %d, current %d\n",
-					data_value[0], data_value[1], response);
-		}
-		if (sdvo_data.saturation) {
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&data_value, 4);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
-				return;
-			}
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_SATURATION, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&response, 2);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
-				return;
-			}
-			sdvo_priv->max_saturation = data_value[0];
-			sdvo_priv->cur_saturation = response;
-			sdvo_priv->saturation_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"saturation", 2);
-			sdvo_priv->saturation_property->values[0] = 0;
-			sdvo_priv->saturation_property->values[1] =
-							data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->saturation_property,
-						sdvo_priv->cur_saturation);
-			DRM_DEBUG_KMS("saturation: max %d, "
-					"default %d, current %d\n",
-					data_value[0], data_value[1], response);
-		}
-		if (sdvo_data.contrast) {
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&data_value, 4);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
-				return;
-			}
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_CONTRAST, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&response, 2);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
-				return;
-			}
-			sdvo_priv->max_contrast = data_value[0];
-			sdvo_priv->cur_contrast = response;
-			sdvo_priv->contrast_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"contrast", 2);
-			sdvo_priv->contrast_property->values[0] = 0;
-			sdvo_priv->contrast_property->values[1] = data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->contrast_property,
-						sdvo_priv->cur_contrast);
-			DRM_DEBUG_KMS("contrast: max %d, "
-					"default %d, current %d\n",
-					data_value[0], data_value[1], response);
-		}
-		if (sdvo_data.hue) {
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_MAX_HUE, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&data_value, 4);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
-				return;
-			}
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_HUE, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&response, 2);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
-				return;
-			}
-			sdvo_priv->max_hue = data_value[0];
-			sdvo_priv->cur_hue = response;
-			sdvo_priv->hue_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"hue", 2);
-			sdvo_priv->hue_property->values[0] = 0;
-			sdvo_priv->hue_property->values[1] =
-							data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->hue_property,
-						sdvo_priv->cur_hue);
-			DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
-					data_value[0], data_value[1], response);
-		}
-	}
-	if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
-		if (sdvo_data.brightness) {
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&data_value, 4);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
-				return;
-			}
-			intel_sdvo_write_cmd(intel_encoder,
-				SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
-			status = intel_sdvo_read_response(intel_encoder,
-				&response, 2);
-			if (status != SDVO_CMD_STATUS_SUCCESS) {
-				DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
-				return;
-			}
-			sdvo_priv->max_brightness = data_value[0];
-			sdvo_priv->cur_brightness = response;
-			sdvo_priv->brightness_property =
-				drm_property_create(dev, DRM_MODE_PROP_RANGE,
-						"brightness", 2);
-			sdvo_priv->brightness_property->values[0] = 0;
-			sdvo_priv->brightness_property->values[1] =
-							data_value[0];
-			drm_connector_attach_property(connector,
-						sdvo_priv->brightness_property,
-						sdvo_priv->cur_brightness);
-			DRM_DEBUG_KMS("brightness: max %d, "
-					"default %d, current %d\n",
-					data_value[0], data_value[1], response);
-		}
-	}
-	return;
+
+	if (IS_TV(intel_sdvo_connector))
+		return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+	else if(IS_LVDS(intel_sdvo_connector))
+		return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+	else
+		return true;
+
 }
 
 bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_encoder *intel_encoder;
-	struct intel_sdvo_priv *sdvo_priv;
+	struct intel_sdvo *intel_sdvo;
 	u8 ch[0x40];
 	int i;
 	u32 i2c_reg, ddc_reg, analog_ddc_reg;
 
-	intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
-	if (!intel_encoder) {
+	intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+	if (!intel_sdvo)
 		return false;
-	}
 
-	sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
-	sdvo_priv->sdvo_reg = sdvo_reg;
+	intel_sdvo->sdvo_reg = sdvo_reg;
 
-	intel_encoder->dev_priv = sdvo_priv;
+	intel_encoder = &intel_sdvo->base;
 	intel_encoder->type = INTEL_OUTPUT_SDVO;
 
 	if (HAS_PCH_SPLIT(dev)) {
@@ -2795,14 +2542,14 @@
 	if (!intel_encoder->i2c_bus)
 		goto err_inteloutput;
 
-	sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
+	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
 
 	/* Save the bit-banging i2c functionality for use by the DDC wrapper */
 	intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
 
 	/* Read the regs to test if we can talk to the device */
 	for (i = 0; i < 0x40; i++) {
-		if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
+		if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
 			DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
 				      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
 			goto err_i2c;
@@ -2812,17 +2559,16 @@
 	/* setup the DDC bus. */
 	if (IS_SDVOB(sdvo_reg)) {
 		intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
-		sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+		intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
 						"SDVOB/VGA DDC BUS");
 		dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
 	} else {
 		intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
-		sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+		intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
 						"SDVOC/VGA DDC BUS");
 		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
 	}
-
-	if (intel_encoder->ddc_bus == NULL)
+	if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
 		goto err_i2c;
 
 	/* Wrap with our custom algo which switches to DDC mode */
@@ -2833,53 +2579,56 @@
 	drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
 
 	/* In default case sdvo lvds is false */
-	intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
+	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+		goto err_enc;
 
-	if (intel_sdvo_output_setup(intel_encoder,
-				    sdvo_priv->caps.output_flags) != true) {
+	if (intel_sdvo_output_setup(intel_sdvo,
+				    intel_sdvo->caps.output_flags) != true) {
 		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
 			      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
-		goto err_i2c;
+		goto err_enc;
 	}
 
-	intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
+	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 
 	/* Set the input timing to the screen. Assume always input 0. */
-	intel_sdvo_set_target_input(intel_encoder, true, false);
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		goto err_enc;
 
-	intel_sdvo_get_input_pixel_clock_range(intel_encoder,
-					       &sdvo_priv->pixel_clock_min,
-					       &sdvo_priv->pixel_clock_max);
-
+	if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+						    &intel_sdvo->pixel_clock_min,
+						    &intel_sdvo->pixel_clock_max))
+		goto err_enc;
 
 	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
 			"clock range %dMHz - %dMHz, "
 			"input 1: %c, input 2: %c, "
 			"output 1: %c, output 2: %c\n",
-			SDVO_NAME(sdvo_priv),
-			sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
-			sdvo_priv->caps.device_rev_id,
-			sdvo_priv->pixel_clock_min / 1000,
-			sdvo_priv->pixel_clock_max / 1000,
-			(sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
-			(sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+			SDVO_NAME(intel_sdvo),
+			intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+			intel_sdvo->caps.device_rev_id,
+			intel_sdvo->pixel_clock_min / 1000,
+			intel_sdvo->pixel_clock_max / 1000,
+			(intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+			(intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
 			/* check currently supported outputs */
-			sdvo_priv->caps.output_flags &
+			intel_sdvo->caps.output_flags &
 			(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
-			sdvo_priv->caps.output_flags &
+			intel_sdvo->caps.output_flags &
 			(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
-
 	return true;
 
+err_enc:
+	drm_encoder_cleanup(&intel_encoder->enc);
 err_i2c:
-	if (sdvo_priv->analog_ddc_bus != NULL)
-		intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+	if (intel_sdvo->analog_ddc_bus != NULL)
+		intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
 	if (intel_encoder->ddc_bus != NULL)
 		intel_i2c_destroy(intel_encoder->ddc_bus);
 	if (intel_encoder->i2c_bus != NULL)
 		intel_i2c_destroy(intel_encoder->i2c_bus);
 err_inteloutput:
-	kfree(intel_encoder);
+	kfree(intel_sdvo);
 
 	return false;
 }
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index ba5cdf8..a386b02 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -312,7 +312,7 @@
 # define SDVO_CLOCK_RATE_MULT_4X				(1 << 3)
 
 #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
-/** 5 bytes of bit flags for TV formats shared by all TV format functions */
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
 struct intel_sdvo_tv_format {
     unsigned int ntsc_m:1;
     unsigned int ntsc_j:1;
@@ -596,32 +596,32 @@
     unsigned int overscan_h:1;
 
     unsigned int overscan_v:1;
-    unsigned int position_h:1;
-    unsigned int position_v:1;
+    unsigned int hpos:1;
+    unsigned int vpos:1;
     unsigned int sharpness:1;
     unsigned int dot_crawl:1;
     unsigned int dither:1;
-    unsigned int max_tv_chroma_filter:1;
-    unsigned int max_tv_luma_filter:1;
+    unsigned int tv_chroma_filter:1;
+    unsigned int tv_luma_filter:1;
 } __attribute__((packed));
 
 /* Picture enhancement limits below are dependent on the current TV format,
  * and thus need to be queried and set after it.
  */
-#define SDVO_CMD_GET_MAX_FLICKER_FITER			0x4d
-#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER		0x7b
-#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER		0x52
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER			0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE	0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D		0x52
 #define SDVO_CMD_GET_MAX_SATURATION			0x55
 #define SDVO_CMD_GET_MAX_HUE				0x58
 #define SDVO_CMD_GET_MAX_BRIGHTNESS			0x5b
 #define SDVO_CMD_GET_MAX_CONTRAST			0x5e
 #define SDVO_CMD_GET_MAX_OVERSCAN_H			0x61
 #define SDVO_CMD_GET_MAX_OVERSCAN_V			0x64
-#define SDVO_CMD_GET_MAX_POSITION_H			0x67
-#define SDVO_CMD_GET_MAX_POSITION_V			0x6a
-#define SDVO_CMD_GET_MAX_SHARPNESS_V			0x6d
-#define SDVO_CMD_GET_MAX_TV_CHROMA			0x74
-#define SDVO_CMD_GET_MAX_TV_LUMA			0x77
+#define SDVO_CMD_GET_MAX_HPOS				0x67
+#define SDVO_CMD_GET_MAX_VPOS				0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS			0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER		0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER			0x77
 struct intel_sdvo_enhancement_limits_reply {
     u16 max_value;
     u16 default_value;
@@ -638,10 +638,10 @@
 
 #define SDVO_CMD_GET_FLICKER_FILTER			0x4e
 #define SDVO_CMD_SET_FLICKER_FILTER			0x4f
-#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER		0x50
-#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER		0x51
-#define SDVO_CMD_GET_2D_FLICKER_FITER			0x53
-#define SDVO_CMD_SET_2D_FLICKER_FITER			0x54
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE		0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE		0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D			0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D			0x54
 #define SDVO_CMD_GET_SATURATION				0x56
 #define SDVO_CMD_SET_SATURATION				0x57
 #define SDVO_CMD_GET_HUE				0x59
@@ -654,16 +654,16 @@
 #define SDVO_CMD_SET_OVERSCAN_H				0x63
 #define SDVO_CMD_GET_OVERSCAN_V				0x65
 #define SDVO_CMD_SET_OVERSCAN_V				0x66
-#define SDVO_CMD_GET_POSITION_H				0x68
-#define SDVO_CMD_SET_POSITION_H				0x69
-#define SDVO_CMD_GET_POSITION_V				0x6b
-#define SDVO_CMD_SET_POSITION_V				0x6c
+#define SDVO_CMD_GET_HPOS				0x68
+#define SDVO_CMD_SET_HPOS				0x69
+#define SDVO_CMD_GET_VPOS				0x6b
+#define SDVO_CMD_SET_VPOS				0x6c
 #define SDVO_CMD_GET_SHARPNESS				0x6e
 #define SDVO_CMD_SET_SHARPNESS				0x6f
-#define SDVO_CMD_GET_TV_CHROMA				0x75
-#define SDVO_CMD_SET_TV_CHROMA				0x76
-#define SDVO_CMD_GET_TV_LUMA				0x78
-#define SDVO_CMD_SET_TV_LUMA				0x79
+#define SDVO_CMD_GET_TV_CHROMA_FILTER			0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER			0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER			0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER			0x79
 struct intel_sdvo_enhancements_arg {
     u16 value;
 }__attribute__((packed));
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index cc3726a..d2029ef 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -44,7 +44,9 @@
 };
 
 /** Private structure for the integrated TV support */
-struct intel_tv_priv {
+struct intel_tv {
+	struct intel_encoder base;
+
 	int type;
 	char *tv_format;
 	int margin[4];
@@ -896,6 +898,11 @@
 	},
 };
 
+static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+{
+	return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
+}
+
 static void
 intel_tv_dpms(struct drm_encoder *encoder, int mode)
 {
@@ -929,19 +936,17 @@
 }
 
 static const struct tv_mode *
-intel_tv_mode_find (struct intel_encoder *intel_encoder)
+intel_tv_mode_find (struct intel_tv *intel_tv)
 {
-	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-
-	return intel_tv_mode_lookup(tv_priv->tv_format);
+	return intel_tv_mode_lookup(intel_tv->tv_format);
 }
 
 static enum drm_mode_status
 intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 
 	/* Ensure TV refresh is close to desired refresh */
 	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -957,8 +962,8 @@
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_mode_config *drm_config = &dev->mode_config;
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	struct drm_encoder *other_encoder;
 
 	if (!tv_mode)
@@ -983,9 +988,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = encoder->crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	u32 tv_ctl;
 	u32 hctl1, hctl2, hctl3;
 	u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1001,7 +1005,7 @@
 	tv_ctl = I915_READ(TV_CTL);
 	tv_ctl &= TV_CTL_SAVE;
 
-	switch (tv_priv->type) {
+	switch (intel_tv->type) {
 	default:
 	case DRM_MODE_CONNECTOR_Unknown:
 	case DRM_MODE_CONNECTOR_Composite:
@@ -1154,11 +1158,11 @@
 
 		/* Wait for vblank for the disable to take effect */
 		if (!IS_I9XX(dev))
-			intel_wait_for_vblank(dev);
+			intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 		I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
 		/* Wait for vblank for the disable to take effect. */
-		intel_wait_for_vblank(dev);
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 		/* Filter ctl must be set before TV_WIN_SIZE */
 		I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1168,12 +1172,12 @@
 		else
 			ysize = 2*tv_mode->nbr_end + 1;
 
-		xpos += tv_priv->margin[TV_MARGIN_LEFT];
-		ypos += tv_priv->margin[TV_MARGIN_TOP];
-		xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
-			  tv_priv->margin[TV_MARGIN_RIGHT]);
-		ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
-			  tv_priv->margin[TV_MARGIN_BOTTOM]);
+		xpos += intel_tv->margin[TV_MARGIN_LEFT];
+		ypos += intel_tv->margin[TV_MARGIN_TOP];
+		xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+			  intel_tv->margin[TV_MARGIN_RIGHT]);
+		ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+			  intel_tv->margin[TV_MARGIN_BOTTOM]);
 		I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
 		I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
 
@@ -1222,11 +1226,12 @@
  * \return false if TV is disconnected.
  */
 static int
-intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
+intel_tv_detect_type (struct intel_tv *intel_tv)
 {
-	struct drm_encoder *encoder = &intel_encoder->enc;
+	struct drm_encoder *encoder = &intel_tv->base.enc;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	unsigned long irqflags;
 	u32 tv_ctl, save_tv_ctl;
 	u32 tv_dac, save_tv_dac;
@@ -1263,11 +1268,11 @@
 		   DAC_C_0_7_V);
 	I915_WRITE(TV_CTL, tv_ctl);
 	I915_WRITE(TV_DAC, tv_dac);
-	intel_wait_for_vblank(dev);
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
 	tv_dac = I915_READ(TV_DAC);
 	I915_WRITE(TV_DAC, save_tv_dac);
 	I915_WRITE(TV_CTL, save_tv_ctl);
-	intel_wait_for_vblank(dev);
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
 	/*
 	 *  A B C
 	 *  0 1 1 Composite
@@ -1304,12 +1309,11 @@
 static void intel_tv_find_better_format(struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	int i;
 
-	if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+	if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
 		tv_mode->component_only)
 		return;
 
@@ -1317,12 +1321,12 @@
 	for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
 		tv_mode = tv_modes + i;
 
-		if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+		if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
 			tv_mode->component_only)
 			break;
 	}
 
-	tv_priv->tv_format = tv_mode->name;
+	intel_tv->tv_format = tv_mode->name;
 	drm_connector_property_set_value(connector,
 		connector->dev->mode_config.tv_mode_property, i);
 }
@@ -1336,31 +1340,31 @@
 static enum drm_connector_status
 intel_tv_detect(struct drm_connector *connector)
 {
-	struct drm_crtc *crtc;
 	struct drm_display_mode mode;
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-	int dpms_mode;
-	int type = tv_priv->type;
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	int type;
 
 	mode = reported_modes[0];
 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
 	if (encoder->crtc && encoder->crtc->enabled) {
-		type = intel_tv_detect_type(encoder->crtc, intel_encoder);
+		type = intel_tv_detect_type(intel_tv);
 	} else {
-		crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+		struct drm_crtc *crtc;
+		int dpms_mode;
+
+		crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
 						  &mode, &dpms_mode);
 		if (crtc) {
-			type = intel_tv_detect_type(crtc, intel_encoder);
-			intel_release_load_detect_pipe(intel_encoder, connector,
+			type = intel_tv_detect_type(intel_tv);
+			intel_release_load_detect_pipe(&intel_tv->base, connector,
 						       dpms_mode);
 		} else
 			type = -1;
 	}
 
-	tv_priv->type = type;
+	intel_tv->type = type;
 
 	if (type < 0)
 		return connector_status_disconnected;
@@ -1391,8 +1395,8 @@
 			       struct drm_display_mode *mode_ptr)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 
 	if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
 		mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1417,8 +1421,8 @@
 {
 	struct drm_display_mode *mode_ptr;
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	int j, count = 0;
 	u64 tmp;
 
@@ -1483,8 +1487,7 @@
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
 	struct drm_crtc *crtc = encoder->crtc;
 	int ret = 0;
 	bool changed = false;
@@ -1494,30 +1497,30 @@
 		goto out;
 
 	if (property == dev->mode_config.tv_left_margin_property &&
-		tv_priv->margin[TV_MARGIN_LEFT] != val) {
-		tv_priv->margin[TV_MARGIN_LEFT] = val;
+		intel_tv->margin[TV_MARGIN_LEFT] != val) {
+		intel_tv->margin[TV_MARGIN_LEFT] = val;
 		changed = true;
 	} else if (property == dev->mode_config.tv_right_margin_property &&
-		tv_priv->margin[TV_MARGIN_RIGHT] != val) {
-		tv_priv->margin[TV_MARGIN_RIGHT] = val;
+		intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+		intel_tv->margin[TV_MARGIN_RIGHT] = val;
 		changed = true;
 	} else if (property == dev->mode_config.tv_top_margin_property &&
-		tv_priv->margin[TV_MARGIN_TOP] != val) {
-		tv_priv->margin[TV_MARGIN_TOP] = val;
+		intel_tv->margin[TV_MARGIN_TOP] != val) {
+		intel_tv->margin[TV_MARGIN_TOP] = val;
 		changed = true;
 	} else if (property == dev->mode_config.tv_bottom_margin_property &&
-		tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
-		tv_priv->margin[TV_MARGIN_BOTTOM] = val;
+		intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+		intel_tv->margin[TV_MARGIN_BOTTOM] = val;
 		changed = true;
 	} else if (property == dev->mode_config.tv_mode_property) {
 		if (val >= ARRAY_SIZE(tv_modes)) {
 			ret = -EINVAL;
 			goto out;
 		}
-		if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
+		if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
 			goto out;
 
-		tv_priv->tv_format = tv_modes[val].name;
+		intel_tv->tv_format = tv_modes[val].name;
 		changed = true;
 	} else {
 		ret = -EINVAL;
@@ -1553,16 +1556,8 @@
 	.best_encoder = intel_attached_encoder,
 };
 
-static void intel_tv_enc_destroy(struct drm_encoder *encoder)
-{
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
-	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
-}
-
 static const struct drm_encoder_funcs intel_tv_enc_funcs = {
-	.destroy = intel_tv_enc_destroy,
+	.destroy = intel_encoder_destroy,
 };
 
 /*
@@ -1606,9 +1601,9 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
+	struct intel_tv *intel_tv;
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
-	struct intel_tv_priv *tv_priv;
 	u32 tv_dac_on, tv_dac_off, save_tv_dac;
 	char **tv_format_names;
 	int i, initial_mode = 0;
@@ -1647,18 +1642,18 @@
 	    (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
 		return;
 
-	intel_encoder = kzalloc(sizeof(struct intel_encoder) +
-			       sizeof(struct intel_tv_priv), GFP_KERNEL);
-	if (!intel_encoder) {
+	intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+	if (!intel_tv) {
 		return;
 	}
 
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
-		kfree(intel_encoder);
+		kfree(intel_tv);
 		return;
 	}
 
+	intel_encoder = &intel_tv->base;
 	connector = &intel_connector->base;
 
 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1668,22 +1663,20 @@
 			 DRM_MODE_ENCODER_TVDAC);
 
 	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
-	tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 	intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
 	intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
 	intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
-	intel_encoder->dev_priv = tv_priv;
-	tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
+	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
 
 	/* BIOS margin values */
-	tv_priv->margin[TV_MARGIN_LEFT] = 54;
-	tv_priv->margin[TV_MARGIN_TOP] = 36;
-	tv_priv->margin[TV_MARGIN_RIGHT] = 46;
-	tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
+	intel_tv->margin[TV_MARGIN_LEFT] = 54;
+	intel_tv->margin[TV_MARGIN_TOP] = 36;
+	intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+	intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
 
-	tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+	intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
 
 	drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
 	drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
@@ -1703,16 +1696,16 @@
 				   initial_mode);
 	drm_connector_attach_property(connector,
 				   dev->mode_config.tv_left_margin_property,
-				   tv_priv->margin[TV_MARGIN_LEFT]);
+				   intel_tv->margin[TV_MARGIN_LEFT]);
 	drm_connector_attach_property(connector,
 				   dev->mode_config.tv_top_margin_property,
-				   tv_priv->margin[TV_MARGIN_TOP]);
+				   intel_tv->margin[TV_MARGIN_TOP]);
 	drm_connector_attach_property(connector,
 				   dev->mode_config.tv_right_margin_property,
-				   tv_priv->margin[TV_MARGIN_RIGHT]);
+				   intel_tv->margin[TV_MARGIN_RIGHT]);
 	drm_connector_attach_property(connector,
 				   dev->mode_config.tv_bottom_margin_property,
-				   tv_priv->margin[TV_MARGIN_BOTTOM]);
+				   intel_tv->margin[TV_MARGIN_BOTTOM]);
 out:
 	drm_sysfs_connector_add(connector);
 }
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index fff8204..9ce2827f 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1085,19 +1085,19 @@
 }
 
 struct drm_ioctl_desc mga_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 };
 
 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0b69a96..e4f33a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2166,7 +2166,7 @@
 	uint32_t val = 0;
 
 	if (off < pci_resource_len(dev->pdev, 1)) {
-		uint32_t __iomem *p =
+		uint8_t __iomem *p =
 			io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 
 		val = ioread32(p + (off & ~PAGE_MASK));
@@ -2182,7 +2182,7 @@
 	uint32_t off, uint32_t val)
 {
 	if (off < pci_resource_len(dev->pdev, 1)) {
-		uint32_t __iomem *p =
+		uint8_t __iomem *p =
 			io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 
 		iowrite32(val, p + (off & ~PAGE_MASK));
@@ -4587,7 +4587,7 @@
 			return 1;
 		}
 
-		NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent);
 	} else
 	if (pxclk == -1) {
@@ -4597,7 +4597,7 @@
 			return 1;
 		}
 
-		NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent);
 	} else
 	if (pxclk == -2) {
@@ -4610,7 +4610,7 @@
 			return 1;
 		}
 
-		NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent);
 	} else
 	if (pxclk > 0) {
@@ -4622,7 +4622,7 @@
 			return 1;
 		}
 
-		NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+		NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent);
 	} else
 	if (pxclk < 0) {
@@ -4634,7 +4634,7 @@
 			return 1;
 		}
 
-		NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+		NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent);
 	}
 
@@ -5357,19 +5357,17 @@
 	}
 
 	tmdstableptr = ROM16(bios->data[bitentry->offset]);
-
-	if (tmdstableptr == 0x0) {
+	if (!tmdstableptr) {
 		NV_ERROR(dev, "Pointer to TMDS table invalid\n");
 		return -EINVAL;
 	}
 
+	NV_INFO(dev, "TMDS table version %d.%d\n",
+		bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+
 	/* nv50+ has v2.0, but we don't parse it atm */
-	if (bios->data[tmdstableptr] != 0x11) {
-		NV_WARN(dev,
-			"TMDS table revision %d.%d not currently supported\n",
-			bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+	if (bios->data[tmdstableptr] != 0x11)
 		return -ENOSYS;
-	}
 
 	/*
 	 * These two scripts are odd: they don't seem to get run even when
@@ -5809,6 +5807,22 @@
 			gpio->line = tvdac_gpio[1] >> 4;
 			gpio->invert = tvdac_gpio[0] & 2;
 		}
+	} else {
+		/*
+		 * No systematic way to store GPIO info on pre-v2.2
+		 * DCBs, try to match the PCI device IDs.
+		 */
+
+		/* Apple iMac G4 NV18 */
+		if (dev->pdev->device == 0x0189 &&
+		    dev->pdev->subsystem_vendor == 0x10de &&
+		    dev->pdev->subsystem_device == 0x0010) {
+			struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+			gpio->tag = DCB_GPIO_TVDAC0;
+			gpio->line = 4;
+		}
+
 	}
 
 	if (!gpio_table_ptr)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 84f8518..f6f4477 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,6 +36,21 @@
 #include <linux/log2.h>
 #include <linux/slab.h>
 
+int
+nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
+{
+	struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+	int ret;
+
+	if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
+		return 0;
+
+	spin_lock(&nvbo->bo.lock);
+	ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+	spin_unlock(&nvbo->bo.lock);
+	return ret;
+}
+
 static void
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 90fdcda..0480f06 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -426,18 +426,18 @@
  ***********************************/
 
 struct drm_ioctl_desc nouveau_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
 };
 
 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index b1b22ba..a1473ff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -104,7 +104,7 @@
 	int i;
 
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-		struct nouveau_i2c_chan *i2c;
+		struct nouveau_i2c_chan *i2c = NULL;
 		struct nouveau_encoder *nv_encoder;
 		struct drm_mode_object *obj;
 		int id;
@@ -117,7 +117,9 @@
 		if (!obj)
 			continue;
 		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
-		i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+
+		if (nv_encoder->dcb->i2c_index < 0xf)
+			i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
 
 		if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
 			*pnv_encoder = nv_encoder;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e424bf7..1e093a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1165,6 +1165,7 @@
 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
 extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
 
 /* nouveau_fence.c */
 struct nouveau_fence;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0f417ac..79fc5ff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -361,16 +361,11 @@
 
 	list_for_each_entry(nvbo, list, entry) {
 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
-		struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
 
-		if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
-			spin_lock(&nvbo->bo.lock);
-			ret = ttm_bo_wait(&nvbo->bo, false, false, false);
-			spin_unlock(&nvbo->bo.lock);
-			if (unlikely(ret)) {
-				NV_ERROR(dev, "fail wait other chan\n");
-				return ret;
-			}
+		ret = nouveau_bo_sync_gpu(nvbo, chan);
+		if (unlikely(ret)) {
+			NV_ERROR(dev, "fail pre-validate sync\n");
+			return ret;
 		}
 
 		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +376,7 @@
 			return ret;
 		}
 
-		nvbo->channel = chan;
+		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
 		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
 				      false, false, false);
 		nvbo->channel = NULL;
@@ -390,6 +385,12 @@
 			return ret;
 		}
 
+		ret = nouveau_bo_sync_gpu(nvbo, chan);
+		if (unlikely(ret)) {
+			NV_ERROR(dev, "fail post-validate sync\n");
+			return ret;
+		}
+
 		if (nvbo->bo.offset == b->presumed.offset &&
 		    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
 		      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -615,6 +616,21 @@
 
 	mutex_lock(&dev->struct_mutex);
 
+	/* Mark push buffers as being used on PFIFO, the validation code
+	 * will then make sure that if the pushbuf bo moves, that they
+	 * happen on the kernel channel, which will in turn cause a sync
+	 * to happen before we try and submit the push buffer.
+	 */
+	for (i = 0; i < req->nr_push; i++) {
+		if (push[i].bo_index >= req->nr_buffers) {
+			NV_ERROR(dev, "push %d buffer not in list\n", i);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		bo[push[i].bo_index].read_domains |= (1 << 31);
+	}
+
 	/* Validate buffer list */
 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 					   req->nr_buffers, &op, &do_reloc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 0bd407c..8461485 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -163,7 +163,7 @@
 	if (entry->chan)
 		return -EEXIST;
 
-	if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) {
+	if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
 		NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 491767f..6b9187d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -214,6 +214,7 @@
 nouveau_sgdma_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_gpuobj *gpuobj = NULL;
 	uint32_t aper_size, obj_size;
 	int i, ret;
@@ -239,10 +240,19 @@
 
 	dev_priv->gart_info.sg_dummy_page =
 		alloc_page(GFP_KERNEL|__GFP_DMA32);
+	if (!dev_priv->gart_info.sg_dummy_page) {
+		nouveau_gpuobj_del(dev, &gpuobj);
+		return -ENOMEM;
+	}
+
 	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
 	dev_priv->gart_info.sg_dummy_bus =
-		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+		pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
 			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
+		nouveau_gpuobj_del(dev, &gpuobj);
+		return -EFAULT;
+	}
 
 	if (dev_priv->card_type < NV_50) {
 		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 44fefb0..eefa5c8 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -129,6 +129,14 @@
 		return false;
 	}
 
+	/* MSI nForce2 IGP */
+	if (dev->pdev->device == 0x01f0 &&
+	    dev->pdev->subsystem_vendor == 0x1462 &&
+	    dev->pdev->subsystem_device == 0x5710) {
+		*pin_mask = 0xc;
+		return false;
+	}
+
 	return true;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 37c7b48..c95bf9b 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -278,7 +278,7 @@
 	/*XXX: incorrect, but needed to make hash func "work" */
 	dev_priv->ramht_offset = 0x10000;
 	dev_priv->ramht_bits   = 9;
-	dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
+	dev_priv->ramht_size   = (1 << dev_priv->ramht_bits) * 8;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 3ab3cdc..6b451f8 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -142,14 +142,16 @@
 nvc0_instmem_suspend(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	u32 *buf;
 	int i;
 
 	dev_priv->susres.ramin_copy = vmalloc(65536);
 	if (!dev_priv->susres.ramin_copy)
 		return -ENOMEM;
+	buf = dev_priv->susres.ramin_copy;
 
-	for (i = 0x700000; i < 0x710000; i += 4)
-		dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i);
+	for (i = 0; i < 65536; i += 4)
+		buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
 	return 0;
 }
 
@@ -157,14 +159,15 @@
 nvc0_instmem_resume(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	u32 *buf = dev_priv->susres.ramin_copy;
 	u64 chan;
 	int i;
 
 	chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
 	nv_wr32(dev, 0x001700, chan >> 16);
 
-	for (i = 0x700000; i < 0x710000; i += 4)
-		nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+	for (i = 0; i < 65536; i += 4)
+		nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
 	vfree(dev_priv->susres.ramin_copy);
 	dev_priv->susres.ramin_copy = NULL;
 
@@ -221,7 +224,7 @@
 	/*XXX: incorrect, but needed to make hash func "work" */
 	dev_priv->ramht_offset = 0x10000;
 	dev_priv->ramht_bits   = 9;
-	dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
+	dev_priv->ramht_size   = (1 << dev_priv->ramht_bits) * 8;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 077af1f..a9e33ce6 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1639,30 +1639,29 @@
 			r128_do_cleanup_pageflip(dev);
 	}
 }
-
 void r128_driver_lastclose(struct drm_device *dev)
 {
 	r128_do_cleanup_cce(dev);
 }
 
 struct drm_ioctl_desc r128_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
 };
 
 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 12ad512..577239a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -471,6 +471,8 @@
 	struct radeon_encoder *radeon_encoder = NULL;
 	u32 adjusted_clock = mode->clock;
 	int encoder_mode = 0;
+	u32 dp_clock = mode->clock;
+	int bpc = 8;
 
 	/* reset the pll flags */
 	pll->flags = 0;
@@ -513,6 +515,17 @@
 		if (encoder->crtc == crtc) {
 			radeon_encoder = to_radeon_encoder(encoder);
 			encoder_mode = atombios_get_encoder_mode(encoder);
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+				if (connector) {
+					struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+					struct radeon_connector_atom_dig *dig_connector =
+						radeon_connector->con_priv;
+
+					dp_clock = dig_connector->dp_clock;
+				}
+			}
+
 			if (ASIC_IS_AVIVO(rdev)) {
 				/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
 				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -555,6 +568,14 @@
 				args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
 				args.v1.ucTransmitterID = radeon_encoder->encoder_id;
 				args.v1.ucEncodeMode = encoder_mode;
+				if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+					/* may want to enable SS on DP eventually */
+					/* args.v1.ucConfig |=
+					   ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+				} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+					args.v1.ucConfig |=
+						ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+				}
 
 				atom_execute_table(rdev->mode_info.atom_context,
 						   index, (uint32_t *)&args);
@@ -568,10 +589,20 @@
 				if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 					struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
-					if (encoder_mode == ATOM_ENCODER_MODE_DP)
+					if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+						/* may want to enable SS on DP/eDP eventually */
+						/*args.v3.sInput.ucDispPllConfig |=
+						  DISPPLL_CONFIG_SS_ENABLE;*/
 						args.v3.sInput.ucDispPllConfig |=
 							DISPPLL_CONFIG_COHERENT_MODE;
-					else {
+						/* 16200 or 27000 */
+						args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+					} else {
+						if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+							/* deep color support */
+							args.v3.sInput.usPixelClock =
+								cpu_to_le16((mode->clock * bpc / 8) / 10);
+						}
 						if (dig->coherent_mode)
 							args.v3.sInput.ucDispPllConfig |=
 								DISPPLL_CONFIG_COHERENT_MODE;
@@ -580,13 +611,19 @@
 								DISPPLL_CONFIG_DUAL_LINK;
 					}
 				} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-					/* may want to enable SS on DP/eDP eventually */
-					/*args.v3.sInput.ucDispPllConfig |=
-						DISPPLL_CONFIG_SS_ENABLE;*/
-					if (encoder_mode == ATOM_ENCODER_MODE_DP)
+					if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+						/* may want to enable SS on DP/eDP eventually */
+						/*args.v3.sInput.ucDispPllConfig |=
+						  DISPPLL_CONFIG_SS_ENABLE;*/
 						args.v3.sInput.ucDispPllConfig |=
 							DISPPLL_CONFIG_COHERENT_MODE;
-					else {
+						/* 16200 or 27000 */
+						args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+					} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+						/* want to enable SS on LVDS eventually */
+						/*args.v3.sInput.ucDispPllConfig |=
+						  DISPPLL_CONFIG_SS_ENABLE;*/
+					} else {
 						if (mode->clock > 165000)
 							args.v3.sInput.ucDispPllConfig |=
 								DISPPLL_CONFIG_DUAL_LINK;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 36e0d4b..4e7778d 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -610,7 +610,7 @@
 		enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
 	else
 		enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
-	if (dig_connector->linkb)
+	if (dig->linkb)
 		enc_id |= ATOM_DP_CONFIG_LINK_B;
 	else
 		enc_id |= ATOM_DP_CONFIG_LINK_A;
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index f40dfb7..bd2f33e 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -156,7 +156,13 @@
 	}
 
 	mode.mode = info.mode;
-	agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+	/* chips with the agp to pcie bridge don't have the AGP_STATUS register
+	 * Just use the whatever mode the host sets up.
+	 */
+	if (rdev->family <= CHIP_RV350)
+		agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+	else
+		agp_status = mode.mode;
 	is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
 
 	if (is_v3) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 646f96f..a21bf88 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -733,6 +733,7 @@
 	.set_engine_clock = &radeon_atom_set_engine_clock,
 	.get_memory_clock = &radeon_atom_get_memory_clock,
 	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = NULL,
 	.set_pcie_lanes = NULL,
 	.set_clock_gating = NULL,
 	.set_surface_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6d30868..61141981 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -32,11 +32,11 @@
 
 /* from radeon_encoder.c */
 extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
-		      uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+			uint8_t dac);
 extern void radeon_link_encoder_connector(struct drm_device *dev);
 extern void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
 			uint32_t supported_device);
 
 /* from radeon_connector.c */
@@ -46,14 +46,14 @@
 			  uint32_t supported_device,
 			  int connector_type,
 			  struct radeon_i2c_bus_rec *i2c_bus,
-			  bool linkb, uint32_t igp_lane_info,
+			  uint32_t igp_lane_info,
 			  uint16_t connector_object_id,
 			  struct radeon_hpd *hpd,
 			  struct radeon_router *router);
 
 /* from radeon_legacy_encoder.c */
 extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
 			  uint32_t supported_device);
 
 union atom_supported_devices {
@@ -226,6 +226,8 @@
 	struct radeon_hpd hpd;
 	u32 reg;
 
+	memset(&hpd, 0, sizeof(struct radeon_hpd));
+
 	if (ASIC_IS_DCE4(rdev))
 		reg = EVERGREEN_DC_GPIO_HPD_A;
 	else
@@ -477,7 +479,6 @@
 	int i, j, k, path_size, device_support;
 	int connector_type;
 	u16 igp_lane_info, conn_id, connector_object_id;
-	bool linkb;
 	struct radeon_i2c_bus_rec ddc_bus;
 	struct radeon_router router;
 	struct radeon_gpio_rec gpio;
@@ -510,7 +511,7 @@
 		addr += path_size;
 		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
 		path_size += le16_to_cpu(path->usSize);
-		linkb = false;
+
 		if (device_support & le16_to_cpu(path->usDeviceTag)) {
 			uint8_t con_obj_id, con_obj_num, con_obj_type;
 
@@ -601,13 +602,10 @@
 				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
 				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
-					if (grph_obj_num == 2)
-						linkb = true;
-					else
-						linkb = false;
+					u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
 
 					radeon_add_atom_encoder(dev,
-								grph_obj_id,
+								encoder_obj,
 								le16_to_cpu
 								(path->
 								 usDeviceTag));
@@ -744,7 +742,7 @@
 						  le16_to_cpu(path->
 							      usDeviceTag),
 						  connector_type, &ddc_bus,
-						  linkb, igp_lane_info,
+						  igp_lane_info,
 						  connector_object_id,
 						  &hpd,
 						  &router);
@@ -933,13 +931,13 @@
 
 		if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
 			radeon_add_atom_encoder(dev,
-						radeon_get_encoder_id(dev,
+						radeon_get_encoder_enum(dev,
 								      (1 << i),
 								      dac),
 						(1 << i));
 		else
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									(1 << i),
 									dac),
 						  (1 << i));
@@ -996,7 +994,7 @@
 						  bios_connectors[i].
 						  connector_type,
 						  &bios_connectors[i].ddc_bus,
-						  false, 0,
+						  0,
 						  connector_object_id,
 						  &bios_connectors[i].hpd,
 						  &router);
@@ -1183,7 +1181,7 @@
 				return true;
 			break;
 		case 2:
-			if (igp_info->info_2.ucMemoryType & 0x0f)
+			if (igp_info->info_2.ulBootUpSidePortClock)
 				return true;
 			break;
 		default:
@@ -1305,6 +1303,7 @@
 	union lvds_info *lvds_info;
 	uint8_t frev, crev;
 	struct radeon_encoder_atom_dig *lvds = NULL;
+	int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
 
 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
 				   &frev, &crev, &data_offset)) {
@@ -1368,6 +1367,12 @@
 		}
 
 		encoder->native_mode = lvds->native_mode;
+
+		if (encoder_enum == 2)
+			lvds->linkb = true;
+		else
+			lvds->linkb = false;
+
 	}
 	return lvds;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 885dcfa..bd74e42 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -39,8 +39,8 @@
 
 /* from radeon_encoder.c */
 extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
-		      uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+			uint8_t dac);
 extern void radeon_link_encoder_connector(struct drm_device *dev);
 
 /* from radeon_connector.c */
@@ -55,7 +55,7 @@
 
 /* from radeon_legacy_encoder.c */
 extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
 			  uint32_t supported_device);
 
 /* old legacy ATI BIOS routines */
@@ -1505,7 +1505,7 @@
 			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_CRT1_SUPPORT,
 									1),
 						  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1520,7 +1520,7 @@
 			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
 			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_LCD1_SUPPORT,
 									0),
 						  ATOM_DEVICE_LCD1_SUPPORT);
@@ -1535,7 +1535,7 @@
 			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_CRT1_SUPPORT,
 									1),
 						  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1550,12 +1550,12 @@
 			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
 			hpd.hpd = RADEON_HPD_1;
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_DFP1_SUPPORT,
 									0),
 						  ATOM_DEVICE_DFP1_SUPPORT);
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_CRT2_SUPPORT,
 									2),
 						  ATOM_DEVICE_CRT2_SUPPORT);
@@ -1571,7 +1571,7 @@
 			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_CRT1_SUPPORT,
 									1),
 						  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1588,7 +1588,7 @@
 			ddc_i2c.valid = false;
 			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_TV1_SUPPORT,
 									2),
 						  ATOM_DEVICE_TV1_SUPPORT);
@@ -1607,7 +1607,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_LCD1_SUPPORT,
 								0),
 					  ATOM_DEVICE_LCD1_SUPPORT);
@@ -1619,7 +1619,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
 								2),
 					  ATOM_DEVICE_CRT2_SUPPORT);
@@ -1631,7 +1631,7 @@
 		ddc_i2c.valid = false;
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
 								2),
 					  ATOM_DEVICE_TV1_SUPPORT);
@@ -1648,7 +1648,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_LCD1_SUPPORT,
 								0),
 					  ATOM_DEVICE_LCD1_SUPPORT);
@@ -1660,12 +1660,12 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 		hpd.hpd = RADEON_HPD_2; /* ??? */
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_DFP2_SUPPORT,
 								0),
 					  ATOM_DEVICE_DFP2_SUPPORT);
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT1_SUPPORT,
 								1),
 					  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1680,7 +1680,7 @@
 		ddc_i2c.valid = false;
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
 								2),
 					  ATOM_DEVICE_TV1_SUPPORT);
@@ -1697,7 +1697,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_LCD1_SUPPORT,
 								0),
 					  ATOM_DEVICE_LCD1_SUPPORT);
@@ -1709,12 +1709,12 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 		hpd.hpd = RADEON_HPD_1; /* ??? */
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_DFP1_SUPPORT,
 								0),
 					  ATOM_DEVICE_DFP1_SUPPORT);
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT1_SUPPORT,
 								1),
 					  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1728,7 +1728,7 @@
 		ddc_i2c.valid = false;
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
 								2),
 					  ATOM_DEVICE_TV1_SUPPORT);
@@ -1745,7 +1745,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_LCD1_SUPPORT,
 								0),
 					  ATOM_DEVICE_LCD1_SUPPORT);
@@ -1757,7 +1757,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT1_SUPPORT,
 								1),
 					  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1769,7 +1769,7 @@
 		ddc_i2c.valid = false;
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
 								2),
 					  ATOM_DEVICE_TV1_SUPPORT);
@@ -1786,12 +1786,12 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
 		hpd.hpd = RADEON_HPD_2; /* ??? */
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_DFP2_SUPPORT,
 								0),
 					  ATOM_DEVICE_DFP2_SUPPORT);
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
 								2),
 					  ATOM_DEVICE_CRT2_SUPPORT);
@@ -1806,7 +1806,7 @@
 		ddc_i2c.valid = false;
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
 								2),
 					  ATOM_DEVICE_TV1_SUPPORT);
@@ -1823,12 +1823,12 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
 		hpd.hpd = RADEON_HPD_1; /* ??? */
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_DFP1_SUPPORT,
 								0),
 					  ATOM_DEVICE_DFP1_SUPPORT);
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
 								2),
 					  ATOM_DEVICE_CRT2_SUPPORT);
@@ -1842,7 +1842,7 @@
 		ddc_i2c.valid = false;
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
 								2),
 					  ATOM_DEVICE_TV1_SUPPORT);
@@ -1859,7 +1859,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
 		hpd.hpd = RADEON_HPD_1; /* ??? */
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_DFP1_SUPPORT,
 								0),
 					  ATOM_DEVICE_DFP1_SUPPORT);
@@ -1871,7 +1871,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
 								2),
 					  ATOM_DEVICE_CRT2_SUPPORT);
@@ -1883,7 +1883,7 @@
 		ddc_i2c.valid = false;
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
 								2),
 					  ATOM_DEVICE_TV1_SUPPORT);
@@ -1900,7 +1900,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT1_SUPPORT,
 								1),
 					  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1912,7 +1912,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
 								2),
 					  ATOM_DEVICE_CRT2_SUPPORT);
@@ -1924,7 +1924,7 @@
 		ddc_i2c.valid = false;
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
 								2),
 					  ATOM_DEVICE_TV1_SUPPORT);
@@ -1941,7 +1941,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT1_SUPPORT,
 								1),
 					  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1952,7 +1952,7 @@
 		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
 		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
-					  radeon_get_encoder_id(dev,
+					  radeon_get_encoder_enum(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
 								2),
 					  ATOM_DEVICE_CRT2_SUPPORT);
@@ -2109,7 +2109,7 @@
 				else
 					devices = ATOM_DEVICE_DFP1_SUPPORT;
 				radeon_add_legacy_encoder(dev,
-							  radeon_get_encoder_id
+							  radeon_get_encoder_enum
 							  (dev, devices, 0),
 							  devices);
 				radeon_add_legacy_connector(dev, i, devices,
@@ -2123,7 +2123,7 @@
 				if (tmp & 0x1) {
 					devices = ATOM_DEVICE_CRT2_SUPPORT;
 					radeon_add_legacy_encoder(dev,
-								  radeon_get_encoder_id
+								  radeon_get_encoder_enum
 								  (dev,
 								   ATOM_DEVICE_CRT2_SUPPORT,
 								   2),
@@ -2131,7 +2131,7 @@
 				} else {
 					devices = ATOM_DEVICE_CRT1_SUPPORT;
 					radeon_add_legacy_encoder(dev,
-								  radeon_get_encoder_id
+								  radeon_get_encoder_enum
 								  (dev,
 								   ATOM_DEVICE_CRT1_SUPPORT,
 								   1),
@@ -2151,7 +2151,7 @@
 				if (tmp & 0x1) {
 					devices |= ATOM_DEVICE_CRT2_SUPPORT;
 					radeon_add_legacy_encoder(dev,
-								  radeon_get_encoder_id
+								  radeon_get_encoder_enum
 								  (dev,
 								   ATOM_DEVICE_CRT2_SUPPORT,
 								   2),
@@ -2159,7 +2159,7 @@
 				} else {
 					devices |= ATOM_DEVICE_CRT1_SUPPORT;
 					radeon_add_legacy_encoder(dev,
-								  radeon_get_encoder_id
+								  radeon_get_encoder_enum
 								  (dev,
 								   ATOM_DEVICE_CRT1_SUPPORT,
 								   1),
@@ -2168,7 +2168,7 @@
 				if ((tmp >> 4) & 0x1) {
 					devices |= ATOM_DEVICE_DFP2_SUPPORT;
 					radeon_add_legacy_encoder(dev,
-								  radeon_get_encoder_id
+								  radeon_get_encoder_enum
 								  (dev,
 								   ATOM_DEVICE_DFP2_SUPPORT,
 								   0),
@@ -2177,7 +2177,7 @@
 				} else {
 					devices |= ATOM_DEVICE_DFP1_SUPPORT;
 					radeon_add_legacy_encoder(dev,
-								  radeon_get_encoder_id
+								  radeon_get_encoder_enum
 								  (dev,
 								   ATOM_DEVICE_DFP1_SUPPORT,
 								   0),
@@ -2202,7 +2202,7 @@
 					connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
 				}
 				radeon_add_legacy_encoder(dev,
-							  radeon_get_encoder_id
+							  radeon_get_encoder_enum
 							  (dev, devices, 0),
 							  devices);
 				radeon_add_legacy_connector(dev, i, devices,
@@ -2215,7 +2215,7 @@
 			case CONNECTOR_CTV_LEGACY:
 			case CONNECTOR_STV_LEGACY:
 				radeon_add_legacy_encoder(dev,
-							  radeon_get_encoder_id
+							  radeon_get_encoder_enum
 							  (dev,
 							   ATOM_DEVICE_TV1_SUPPORT,
 							   2),
@@ -2242,12 +2242,12 @@
 			DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
 
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_CRT1_SUPPORT,
 									1),
 						  ATOM_DEVICE_CRT1_SUPPORT);
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_DFP1_SUPPORT,
 									0),
 						  ATOM_DEVICE_DFP1_SUPPORT);
@@ -2268,7 +2268,7 @@
 			DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
 			if (crt_info) {
 				radeon_add_legacy_encoder(dev,
-							  radeon_get_encoder_id(dev,
+							  radeon_get_encoder_enum(dev,
 										ATOM_DEVICE_CRT1_SUPPORT,
 										1),
 							  ATOM_DEVICE_CRT1_SUPPORT);
@@ -2297,7 +2297,7 @@
 						     COMBIOS_LCD_DDC_INFO_TABLE);
 
 			radeon_add_legacy_encoder(dev,
-						  radeon_get_encoder_id(dev,
+						  radeon_get_encoder_enum(dev,
 									ATOM_DEVICE_LCD1_SUPPORT,
 									0),
 						  ATOM_DEVICE_LCD1_SUPPORT);
@@ -2351,7 +2351,7 @@
 					hpd.hpd = RADEON_HPD_NONE;
 					ddc_i2c.valid = false;
 					radeon_add_legacy_encoder(dev,
-								  radeon_get_encoder_id
+								  radeon_get_encoder_enum
 								  (dev,
 								   ATOM_DEVICE_TV1_SUPPORT,
 								   2),
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 47c4b27..31a09cd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -977,24 +977,25 @@
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	enum drm_connector_status ret = connector_status_disconnected;
 	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
-	u8 sink_type;
 
 	if (radeon_connector->edid) {
 		kfree(radeon_connector->edid);
 		radeon_connector->edid = NULL;
 	}
 
-	sink_type = radeon_dp_getsinktype(radeon_connector);
-	if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-	    (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
-		if (radeon_dp_getdpcd(radeon_connector)) {
-			radeon_dig_connector->dp_sink_type = sink_type;
+	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+		/* eDP is always DP */
+		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		if (radeon_dp_getdpcd(radeon_connector))
 			ret = connector_status_connected;
-		}
 	} else {
-		if (radeon_ddc_probe(radeon_connector)) {
-			radeon_dig_connector->dp_sink_type = sink_type;
-			ret = connector_status_connected;
+		radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+		if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+			if (radeon_dp_getdpcd(radeon_connector))
+				ret = connector_status_connected;
+		} else {
+			if (radeon_ddc_probe(radeon_connector))
+				ret = connector_status_connected;
 		}
 	}
 
@@ -1037,7 +1038,6 @@
 			  uint32_t supported_device,
 			  int connector_type,
 			  struct radeon_i2c_bus_rec *i2c_bus,
-			  bool linkb,
 			  uint32_t igp_lane_info,
 			  uint16_t connector_object_id,
 			  struct radeon_hpd *hpd,
@@ -1128,7 +1128,6 @@
 		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
 		if (!radeon_dig_connector)
 			goto failed;
-		radeon_dig_connector->linkb = linkb;
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
 		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1158,7 +1157,6 @@
 		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
 		if (!radeon_dig_connector)
 			goto failed;
-		radeon_dig_connector->linkb = linkb;
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
 		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1182,7 +1180,6 @@
 		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
 		if (!radeon_dig_connector)
 			goto failed;
-		radeon_dig_connector->linkb = linkb;
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
 		drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1229,7 +1226,6 @@
 		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
 		if (!radeon_dig_connector)
 			goto failed;
-		radeon_dig_connector->linkb = linkb;
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
 		drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4f7a170..69b3c22 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -199,7 +199,7 @@
 		mc->mc_vram_size = mc->aper_size;
 	}
 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
 		mc->real_vram_size = mc->aper_size;
 		mc->mc_vram_size = mc->aper_size;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5764f4d..6dd434a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1094,6 +1094,18 @@
 	radeon_i2c_fini(rdev);
 }
 
+static bool is_hdtv_mode(struct drm_display_mode *mode)
+{
+	/* try and guess if this is a tv or a monitor */
+	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+	    (mode->vdisplay == 576) || /* 576p */
+	    (mode->vdisplay == 720) || /* 720p */
+	    (mode->vdisplay == 1080)) /* 1080p */
+		return true;
+	else
+		return false;
+}
+
 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
@@ -1141,7 +1153,8 @@
 			if (ASIC_IS_AVIVO(rdev) &&
 			    ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
 			     ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
-			      drm_detect_hdmi_monitor(radeon_connector->edid)))) {
+			      drm_detect_hdmi_monitor(radeon_connector->edid) &&
+			      is_hdtv_mode(mode)))) {
 				radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
 				radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
 				radeon_crtc->rmx_type = RMX_FULL;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 263c809..2c293e8 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -81,7 +81,7 @@
 }
 
 uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	uint32_t ret = 0;
@@ -97,59 +97,59 @@
 			if ((rdev->family == CHIP_RS300) ||
 			    (rdev->family == CHIP_RS400) ||
 			    (rdev->family == CHIP_RS480))
-				ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+				ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
 			else if (ASIC_IS_AVIVO(rdev))
-				ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+				ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
 			else
-				ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
+				ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
 			break;
 		case 2: /* dac b */
 			if (ASIC_IS_AVIVO(rdev))
-				ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+				ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
 			else {
 				/*if (rdev->family == CHIP_R200)
-				  ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+				  ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
 				  else*/
-				ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+				ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
 			}
 			break;
 		case 3: /* external dac */
 			if (ASIC_IS_AVIVO(rdev))
-				ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+				ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
 			else
-				ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+				ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
 			break;
 		}
 		break;
 	case ATOM_DEVICE_LCD1_SUPPORT:
 		if (ASIC_IS_AVIVO(rdev))
-			ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+			ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
 		else
-			ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
+			ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
 		break;
 	case ATOM_DEVICE_DFP1_SUPPORT:
 		if ((rdev->family == CHIP_RS300) ||
 		    (rdev->family == CHIP_RS400) ||
 		    (rdev->family == CHIP_RS480))
-			ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+			ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
 		else if (ASIC_IS_AVIVO(rdev))
-			ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+			ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
 		else
-			ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+			ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
 		break;
 	case ATOM_DEVICE_LCD2_SUPPORT:
 	case ATOM_DEVICE_DFP2_SUPPORT:
 		if ((rdev->family == CHIP_RS600) ||
 		    (rdev->family == CHIP_RS690) ||
 		    (rdev->family == CHIP_RS740))
-			ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
+			ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
 		else if (ASIC_IS_AVIVO(rdev))
-			ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+			ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
 		else
-			ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+			ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
 		break;
 	case ATOM_DEVICE_DFP3_SUPPORT:
-		ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+		ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
 		break;
 	}
 
@@ -228,32 +228,6 @@
 	return NULL;
 }
 
-static struct radeon_connector_atom_dig *
-radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct drm_connector *connector;
-	struct radeon_connector *radeon_connector;
-	struct radeon_connector_atom_dig *dig_connector;
-
-	if (!rdev->is_atom_bios)
-		return NULL;
-
-	connector = radeon_get_connector_for_encoder(encoder);
-	if (!connector)
-		return NULL;
-
-	radeon_connector = to_radeon_connector(connector);
-
-	if (!radeon_connector->con_priv)
-		return NULL;
-
-	dig_connector = radeon_connector->con_priv;
-
-	return dig_connector;
-}
-
 void radeon_panel_mode_fixup(struct drm_encoder *encoder,
 			     struct drm_display_mode *adjusted_mode)
 {
@@ -512,14 +486,12 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-	struct radeon_connector_atom_dig *dig_connector =
-		radeon_get_atom_connector_priv_from_encoder(encoder);
 	union lvds_encoder_control args;
 	int index = 0;
 	int hdmi_detected = 0;
 	uint8_t frev, crev;
 
-	if (!dig || !dig_connector)
+	if (!dig)
 		return;
 
 	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -562,7 +534,7 @@
 				if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
 					args.v1.ucMisc |= (1 << 1);
 			} else {
-				if (dig_connector->linkb)
+				if (dig->linkb)
 					args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
 				if (radeon_encoder->pixel_clock > 165000)
 					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -601,7 +573,7 @@
 						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
 				}
 			} else {
-				if (dig_connector->linkb)
+				if (dig->linkb)
 					args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
 				if (radeon_encoder->pixel_clock > 165000)
 					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -623,6 +595,8 @@
 int
 atombios_get_encoder_mode(struct drm_encoder *encoder)
 {
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
 	struct radeon_connector_atom_dig *dig_connector;
@@ -636,9 +610,13 @@
 	switch (connector->connector_type) {
 	case DRM_MODE_CONNECTOR_DVII:
 	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-		if (drm_detect_hdmi_monitor(radeon_connector->edid))
-			return ATOM_ENCODER_MODE_HDMI;
-		else if (radeon_connector->use_digital)
+		if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+			/* fix me */
+			if (ASIC_IS_DCE4(rdev))
+				return ATOM_ENCODER_MODE_DVI;
+			else
+				return ATOM_ENCODER_MODE_HDMI;
+		} else if (radeon_connector->use_digital)
 			return ATOM_ENCODER_MODE_DVI;
 		else
 			return ATOM_ENCODER_MODE_CRT;
@@ -646,9 +624,13 @@
 	case DRM_MODE_CONNECTOR_DVID:
 	case DRM_MODE_CONNECTOR_HDMIA:
 	default:
-		if (drm_detect_hdmi_monitor(radeon_connector->edid))
-			return ATOM_ENCODER_MODE_HDMI;
-		else
+		if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+			/* fix me */
+			if (ASIC_IS_DCE4(rdev))
+				return ATOM_ENCODER_MODE_DVI;
+			else
+				return ATOM_ENCODER_MODE_HDMI;
+		} else
 			return ATOM_ENCODER_MODE_DVI;
 		break;
 	case DRM_MODE_CONNECTOR_LVDS:
@@ -660,9 +642,13 @@
 		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
 		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
 			return ATOM_ENCODER_MODE_DP;
-		else if (drm_detect_hdmi_monitor(radeon_connector->edid))
-			return ATOM_ENCODER_MODE_HDMI;
-		else
+		else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+			/* fix me */
+			if (ASIC_IS_DCE4(rdev))
+				return ATOM_ENCODER_MODE_DVI;
+			else
+				return ATOM_ENCODER_MODE_HDMI;
+		} else
 			return ATOM_ENCODER_MODE_DVI;
 		break;
 	case DRM_MODE_CONNECTOR_DVIA:
@@ -729,13 +715,24 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-	struct radeon_connector_atom_dig *dig_connector =
-		radeon_get_atom_connector_priv_from_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 	union dig_encoder_control args;
 	int index = 0;
 	uint8_t frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
 
-	if (!dig || !dig_connector)
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+	}
+
+	/* no dig encoder assigned */
+	if (dig->dig_encoder == -1)
 		return;
 
 	memset(&args, 0, sizeof(args));
@@ -757,9 +754,9 @@
 	args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
 	if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
-		if (dig_connector->dp_clock == 270000)
+		if (dp_clock == 270000)
 			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
-		args.v1.ucLaneNum = dig_connector->dp_lane_count;
+		args.v1.ucLaneNum = dp_lane_count;
 	} else if (radeon_encoder->pixel_clock > 165000)
 		args.v1.ucLaneNum = 8;
 	else
@@ -781,7 +778,7 @@
 			args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
 			break;
 		}
-		if (dig_connector->linkb)
+		if (dig->linkb)
 			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
 		else
 			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -804,38 +801,47 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-	struct radeon_connector_atom_dig *dig_connector =
-		radeon_get_atom_connector_priv_from_encoder(encoder);
-	struct drm_connector *connector;
-	struct radeon_connector *radeon_connector;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 	union dig_transmitter_control args;
 	int index = 0;
 	uint8_t frev, crev;
 	bool is_dp = false;
 	int pll_id = 0;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	int igp_lane_info = 0;
 
-	if (!dig || !dig_connector)
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+		igp_lane_info = dig_connector->igp_lane_info;
+	}
+
+	/* no dig encoder assigned */
+	if (dig->dig_encoder == -1)
 		return;
 
-	connector = radeon_get_connector_for_encoder(encoder);
-	radeon_connector = to_radeon_connector(connector);
-
 	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
 		is_dp = true;
 
 	memset(&args, 0, sizeof(args));
 
-	if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
 		index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
-	else {
-		switch (radeon_encoder->encoder_id) {
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-			index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
-			break;
-		}
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+		break;
 	}
 
 	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -843,14 +849,14 @@
 
 	args.v1.ucAction = action;
 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-		args.v1.usInitInfo = radeon_connector->connector_object_id;
+		args.v1.usInitInfo = connector_object_id;
 	} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
 		args.v1.asMode.ucLaneSel = lane_num;
 		args.v1.asMode.ucLaneSet = lane_set;
 	} else {
 		if (is_dp)
 			args.v1.usPixelClock =
-				cpu_to_le16(dig_connector->dp_clock / 10);
+				cpu_to_le16(dp_clock / 10);
 		else if (radeon_encoder->pixel_clock > 165000)
 			args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 		else
@@ -858,13 +864,13 @@
 	}
 	if (ASIC_IS_DCE4(rdev)) {
 		if (is_dp)
-			args.v3.ucLaneNum = dig_connector->dp_lane_count;
+			args.v3.ucLaneNum = dp_lane_count;
 		else if (radeon_encoder->pixel_clock > 165000)
 			args.v3.ucLaneNum = 8;
 		else
 			args.v3.ucLaneNum = 4;
 
-		if (dig_connector->linkb) {
+		if (dig->linkb) {
 			args.v3.acConfig.ucLinkSel = 1;
 			args.v3.acConfig.ucEncoderSel = 1;
 		}
@@ -904,7 +910,7 @@
 		}
 	} else if (ASIC_IS_DCE32(rdev)) {
 		args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
-		if (dig_connector->linkb)
+		if (dig->linkb)
 			args.v2.acConfig.ucLinkSel = 1;
 
 		switch (radeon_encoder->encoder_id) {
@@ -938,23 +944,23 @@
 		if ((rdev->flags & RADEON_IS_IGP) &&
 		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
 			if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
-				if (dig_connector->igp_lane_info & 0x1)
+				if (igp_lane_info & 0x1)
 					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-				else if (dig_connector->igp_lane_info & 0x2)
+				else if (igp_lane_info & 0x2)
 					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
-				else if (dig_connector->igp_lane_info & 0x4)
+				else if (igp_lane_info & 0x4)
 					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
-				else if (dig_connector->igp_lane_info & 0x8)
+				else if (igp_lane_info & 0x8)
 					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
 			} else {
-				if (dig_connector->igp_lane_info & 0x3)
+				if (igp_lane_info & 0x3)
 					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
-				else if (dig_connector->igp_lane_info & 0xc)
+				else if (igp_lane_info & 0xc)
 					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
 			}
 		}
 
-		if (dig_connector->linkb)
+		if (dig->linkb)
 			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
 		else
 			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1072,8 +1078,7 @@
 	if (is_dig) {
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
-			if (!ASIC_IS_DCE4(rdev))
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 			if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
 				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
@@ -1085,8 +1090,7 @@
 		case DRM_MODE_DPMS_STANDBY:
 		case DRM_MODE_DPMS_SUSPEND:
 		case DRM_MODE_DPMS_OFF:
-			if (!ASIC_IS_DCE4(rdev))
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
 			if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
 				if (ASIC_IS_DCE4(rdev))
 					atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1290,24 +1294,22 @@
 	uint32_t dig_enc_in_use = 0;
 
 	if (ASIC_IS_DCE4(rdev)) {
-		struct radeon_connector_atom_dig *dig_connector =
-			radeon_get_atom_connector_priv_from_encoder(encoder);
-
+		dig = radeon_encoder->enc_priv;
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			if (dig_connector->linkb)
+			if (dig->linkb)
 				return 1;
 			else
 				return 0;
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-			if (dig_connector->linkb)
+			if (dig->linkb)
 				return 3;
 			else
 				return 2;
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-			if (dig_connector->linkb)
+			if (dig->linkb)
 				return 5;
 			else
 				return 4;
@@ -1641,6 +1643,7 @@
 struct radeon_encoder_atom_dig *
 radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
 {
+	int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
 	struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
 	if (!dig)
@@ -1650,11 +1653,16 @@
 	dig->coherent_mode = true;
 	dig->dig_encoder = -1;
 
+	if (encoder_enum == 2)
+		dig->linkb = true;
+	else
+		dig->linkb = false;
+
 	return dig;
 }
 
 void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_encoder *encoder;
@@ -1663,7 +1671,7 @@
 	/* see if we already added it */
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		radeon_encoder = to_radeon_encoder(encoder);
-		if (radeon_encoder->encoder_id == encoder_id) {
+		if (radeon_encoder->encoder_enum == encoder_enum) {
 			radeon_encoder->devices |= supported_device;
 			return;
 		}
@@ -1691,7 +1699,8 @@
 
 	radeon_encoder->enc_priv = NULL;
 
-	radeon_encoder->encoder_id = encoder_id;
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
 	radeon_encoder->devices = supported_device;
 	radeon_encoder->rmx_type = RMX_OFF;
 	radeon_encoder->underscan_type = UNDERSCAN_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index dbf8696..c74a8b2 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -118,7 +118,7 @@
 	aligned_size = ALIGN(size, PAGE_SIZE);
 	ret = radeon_gem_object_create(rdev, aligned_size, 0,
 				       RADEON_GEM_DOMAIN_VRAM,
-				       false, ttm_bo_type_kernel,
+				       false, true,
 				       &gobj);
 	if (ret) {
 		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index bfd2ce5..0416804 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -99,6 +99,13 @@
 		}
 	}
 
+	/* switch the pads to ddc mode */
+	if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+		temp = RREG32(rec->mask_clk_reg);
+		temp &= ~(1 << 16);
+		WREG32(rec->mask_clk_reg, temp);
+	}
+
 	/* clear the output pin values */
 	temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
 	WREG32(rec->a_clk_reg, temp);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 059bfa4..a108c7e 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -121,11 +121,12 @@
 	 * chips.  Disable MSI on them for now.
 	 */
 	if ((rdev->family >= CHIP_RV380) &&
-	    (!(rdev->flags & RADEON_IS_IGP))) {
+	    (!(rdev->flags & RADEON_IS_IGP)) &&
+	    (!(rdev->flags & RADEON_IS_AGP))) {
 		int ret = pci_enable_msi(rdev->pdev);
 		if (!ret) {
 			rdev->msi_enabled = 1;
-			DRM_INFO("radeon: using MSI.\n");
+			dev_info(rdev->dev, "radeon: using MSI.\n");
 		}
 	}
 	rdev->irq.installed = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index b1c8ace..5eee3c4 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -161,6 +161,7 @@
 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
 			return -EINVAL;
 		}
+		break;
 	case RADEON_INFO_WANT_HYPERZ:
 		/* The "value" here is both an input and output parameter.
 		 * If the input value is 1, filp requests hyper-z access.
@@ -323,45 +324,45 @@
 
 
 struct drm_ioctl_desc radeon_ioctls_kms[] = {
-	DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
 	/* KMS */
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
 };
 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 989df51..305049a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -272,7 +272,7 @@
 	if (!ref_div)
 		return 1;
 
-	vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+	vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
 
 	/*
 	 * This is horribly crude: the VCO frequency range is divided into
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index b8149cb..0b83970 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1345,7 +1345,7 @@
 }
 
 void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@
 	/* see if we already added it */
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		radeon_encoder = to_radeon_encoder(encoder);
-		if (radeon_encoder->encoder_id == encoder_id) {
+		if (radeon_encoder->encoder_enum == encoder_enum) {
 			radeon_encoder->devices |= supported_device;
 			return;
 		}
@@ -1374,7 +1374,8 @@
 
 	radeon_encoder->enc_priv = NULL;
 
-	radeon_encoder->encoder_id = encoder_id;
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
 	radeon_encoder->devices = supported_device;
 	radeon_encoder->rmx_type = RMX_OFF;
 
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5bbc086..8f93e2b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -342,6 +342,7 @@
 };
 
 struct radeon_encoder_atom_dig {
+	bool linkb;
 	/* atom dig */
 	bool coherent_mode;
 	int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -360,6 +361,7 @@
 
 struct radeon_encoder {
 	struct drm_encoder base;
+	uint32_t encoder_enum;
 	uint32_t encoder_id;
 	uint32_t devices;
 	uint32_t active_device;
@@ -378,7 +380,6 @@
 
 struct radeon_connector_atom_dig {
 	uint32_t igp_lane_info;
-	bool linkb;
 	/* displayport */
 	struct radeon_i2c_chan *dp_i2c_bus;
 	u8 dpcd[8];
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 58038f5c..477ba67 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -226,6 +226,11 @@
 {
 	int i;
 
+	/* no need to take locks, etc. if nothing's going to change */
+	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+		return;
+
 	mutex_lock(&rdev->ddev->struct_mutex);
 	mutex_lock(&rdev->vram_mutex);
 	mutex_lock(&rdev->cp.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index b3ba44c0..4ae5a3d 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3228,34 +3228,34 @@
 }
 
 struct drm_ioctl_desc radeon_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
 };
 
 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 976dc8d..bf5f83e 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1082,10 +1082,10 @@
 }
 
 struct drm_ioctl_desc savage_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
 };
 
 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 07d0f29..7fe2b63 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -320,12 +320,12 @@
 }
 
 struct drm_ioctl_desc sis_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
 };
 
 int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 68dda74..cc0ffa9 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -722,20 +722,20 @@
 }
 
 struct drm_ioctl_desc via_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
-	DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
-	DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
-	DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+	DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
 };
 
 int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9dd395b..72ec2e2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -99,47 +99,47 @@
  */
 
 #define VMW_IOCTL_DEF(ioctl, func, flags) \
-	[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
 
 /**
  * Ioctl definitions.
  */
 
 static struct drm_ioctl_desc vmw_ioctls[] = {
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
 		      vmw_kms_cursor_bypass_ioctl,
 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+	VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
 		      DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
-	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
 };
 
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e635199..0c52899 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1299,6 +1299,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index f44bdc0..8ca7f65 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -159,6 +159,13 @@
 {
 	struct egalax_data *td = hid_get_drvdata(hid);
 
+	/* Note, eGalax has two product lines: the first is resistive and
+	 * uses a standard parallel multitouch protocol (product ID ==
+	 * 48xx).  The second is capacitive and uses an unusual "serial"
+	 * protocol with a different message for each multitouch finger
+	 * (product ID == 72xx).  We do not yet generate a correct event
+	 * sequence for the capacitive/serial protocol.
+	 */
 	if (hid->claimed & HID_CLAIMED_INPUT) {
 		struct input_dev *input = field->hidinput->input;
 
@@ -246,6 +253,8 @@
 static const struct hid_device_id egalax_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
 			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, egalax_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d3fc13a..85c6d13 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -188,6 +188,7 @@
 #define USB_VENDOR_ID_DWAV		0x0eef
 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER	0x0001
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH	0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1	0x720c
 
 #define USB_VENDOR_ID_ELECOM		0x056e
 #define USB_DEVICE_ID_ELECOM_BM084	0x0061
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 346f0e3..bc2e077 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -547,11 +547,11 @@
 	ref_cnt--;
 	mutex_lock(&info->lock);
 	(*ref_cnt)--;
-	may_release = !ref_cnt;
+	may_release = !*ref_cnt;
 	mutex_unlock(&info->lock);
 	if (may_release) {
-		framebuffer_release(info);
 		vfree((u8 *)info->fix.smem_start);
+		framebuffer_release(info);
 	}
 }
 
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 254a003..0a29c51 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -266,13 +266,15 @@
 {
 	struct hiddev_list *list;
 	struct usb_interface *intf;
+	struct hid_device *hid;
 	struct hiddev *hiddev;
 	int res;
 
 	intf = usb_find_interface(&hiddev_driver, iminor(inode));
 	if (!intf)
 		return -ENODEV;
-	hiddev = usb_get_intfdata(intf);
+	hid = usb_get_intfdata(intf);
+	hiddev = hid->hiddev;
 
 	if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL)))
 		return -ENOMEM;
@@ -587,7 +589,7 @@
 	struct hiddev_list *list = file->private_data;
 	struct hiddev *hiddev = list->hiddev;
 	struct hid_device *hid = hiddev->hid;
-	struct usb_device *dev = hid_to_usb_dev(hid);
+	struct usb_device *dev;
 	struct hiddev_collection_info cinfo;
 	struct hiddev_report_info rinfo;
 	struct hiddev_field_info finfo;
@@ -601,9 +603,11 @@
 	/* Called without BKL by compat methods so no BKL taken */
 
 	/* FIXME: Who or what stop this racing with a disconnect ?? */
-	if (!hiddev->exist)
+	if (!hiddev->exist || !hid)
 		return -EIO;
 
+	dev = hid_to_usb_dev(hid);
+
 	switch (cmd) {
 
 	case HIDIOCGVERSION:
@@ -888,7 +892,6 @@
 	hid->hiddev = hiddev;
 	hiddev->hid = hid;
 	hiddev->exist = 1;
-	usb_set_intfdata(usbhid->intf, usbhid);
 	retval = usb_register_dev(usbhid->intf, &hiddev_class);
 	if (retval) {
 		err_hid("Not able to get a minor for this device.");
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0fba829..4d4d09b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -332,11 +332,11 @@
 	  will be called f71805f.
 
 config SENSORS_F71882FG
-	tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000"
+	tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000"
 	depends on EXPERIMENTAL
 	help
-	  If you say yes here you get support for hardware monitoring features
-	  of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
+	  If you say yes here you get support for hardware monitoring
+	  features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
 	  F71889FG and F8000 Super-I/O chips.
 
 	  This driver can also be built as a module.  If so, the module
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 6207120..537841e 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -45,7 +45,6 @@
 #define SIO_REG_ADDR		0x60	/* Logical device address (2 bytes) */
 
 #define SIO_FINTEK_ID		0x1934	/* Manufacturers ID */
-#define SIO_F71808_ID		0x0901  /* Chipset ID */
 #define SIO_F71858_ID		0x0507  /* Chipset ID */
 #define SIO_F71862_ID		0x0601	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
@@ -97,10 +96,9 @@
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
+enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
 
 static const char *f71882fg_names[] = {
-	"f71808fg",
 	"f71858fg",
 	"f71862fg",
 	"f71882fg",
@@ -308,8 +306,8 @@
 	SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
 };
 
-/* In attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
+/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
+static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
 	SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
 	SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
 	SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
@@ -319,22 +317,6 @@
 	SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
 	SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
 	SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
-};
-
-/* In attr for the f71808fg */
-static struct sensor_device_attribute_2 f71808_in_attr[] = {
-	SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
-	SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
-	SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
-	SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
-	SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
-	SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
-	SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7),
-	SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8),
-};
-
-/* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
 	SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
 	SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
 		store_temp_max, 0, 1),
@@ -373,10 +355,6 @@
 		store_temp_beep, 0, 6),
 	SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
 	SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
-};
-
-/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 f71862_temp_attr[] = {
 	SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
 	SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
 		store_temp_max, 0, 3),
@@ -1011,11 +989,6 @@
 				data->temp_type[1] = 6;
 				break;
 			}
-		} else if (data->type == f71808fg) {
-			reg  = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
-			data->temp_type[1] = (reg & 0x02) ? 2 : 4;
-			data->temp_type[2] = (reg & 0x04) ? 2 : 4;
-
 		} else {
 			reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
 			if ((reg2 & 0x03) == 0x01)
@@ -1898,8 +1871,7 @@
 
 	val /= 1000;
 
-	if (data->type == f71889fg
-	 || data->type == f71808fg)
+	if (data->type == f71889fg)
 		val = SENSORS_LIMIT(val, -128, 127);
 	else
 		val = SENSORS_LIMIT(val, 0, 127);
@@ -2002,28 +1974,8 @@
 			/* fall through! */
 		case f71862fg:
 			err = f71882fg_create_sysfs_files(pdev,
-					f71862_temp_attr,
-					ARRAY_SIZE(f71862_temp_attr));
-			if (err)
-				goto exit_unregister_sysfs;
-			err = f71882fg_create_sysfs_files(pdev,
-					fxxxx_in_attr,
-					ARRAY_SIZE(fxxxx_in_attr));
-			if (err)
-				goto exit_unregister_sysfs;
-			err = f71882fg_create_sysfs_files(pdev,
-					fxxxx_temp_attr,
-					ARRAY_SIZE(fxxxx_temp_attr));
-			break;
-		case f71808fg:
-			err = f71882fg_create_sysfs_files(pdev,
-					f71808_in_attr,
-					ARRAY_SIZE(f71808_in_attr));
-			if (err)
-				goto exit_unregister_sysfs;
-			err = f71882fg_create_sysfs_files(pdev,
-					fxxxx_temp_attr,
-					ARRAY_SIZE(fxxxx_temp_attr));
+					fxxxx_in_temp_attr,
+					ARRAY_SIZE(fxxxx_in_temp_attr));
 			break;
 		case f8000:
 			err = f71882fg_create_sysfs_files(pdev,
@@ -2050,7 +2002,6 @@
 		case f71862fg:
 			err = (data->pwm_enable & 0x15) != 0x15;
 			break;
-		case f71808fg:
 		case f71882fg:
 		case f71889fg:
 			err = 0;
@@ -2096,7 +2047,6 @@
 					f8000_auto_pwm_attr,
 					ARRAY_SIZE(f8000_auto_pwm_attr));
 			break;
-		case f71808fg:
 		case f71889fg:
 			for (i = 0; i < nr_fans; i++) {
 				data->pwm_auto_point_mapping[i] =
@@ -2176,22 +2126,8 @@
 			/* fall through! */
 		case f71862fg:
 			f71882fg_remove_sysfs_files(pdev,
-					f71862_temp_attr,
-					ARRAY_SIZE(f71862_temp_attr));
-			f71882fg_remove_sysfs_files(pdev,
-					fxxxx_in_attr,
-					ARRAY_SIZE(fxxxx_in_attr));
-			f71882fg_remove_sysfs_files(pdev,
-					fxxxx_temp_attr,
-					ARRAY_SIZE(fxxxx_temp_attr));
-			break;
-		case f71808fg:
-			f71882fg_remove_sysfs_files(pdev,
-					f71808_in_attr,
-					ARRAY_SIZE(f71808_in_attr));
-			f71882fg_remove_sysfs_files(pdev,
-					fxxxx_temp_attr,
-					ARRAY_SIZE(fxxxx_temp_attr));
+					fxxxx_in_temp_attr,
+					ARRAY_SIZE(fxxxx_in_temp_attr));
 			break;
 		case f8000:
 			f71882fg_remove_sysfs_files(pdev,
@@ -2259,9 +2195,6 @@
 
 	devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
 	switch (devid) {
-	case SIO_F71808_ID:
-		sio_data->type = f71808fg;
-		break;
 	case SIO_F71858_ID:
 		sio_data->type = f71858fg;
 		break;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 11567c7..c148b63 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2136,16 +2136,6 @@
 	 * with the rest of the array)
 	 */
 	mdk_rdev_t *rdev;
-
-	/* First make sure individual recovery_offsets are correct */
-	list_for_each_entry(rdev, &mddev->disks, same_set) {
-		if (rdev->raid_disk >= 0 &&
-		    mddev->delta_disks >= 0 &&
-		    !test_bit(In_sync, &rdev->flags) &&
-		    mddev->curr_resync_completed > rdev->recovery_offset)
-				rdev->recovery_offset = mddev->curr_resync_completed;
-
-	}	
 	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		if (rdev->sb_events == mddev->events ||
 		    (nospares &&
@@ -2167,12 +2157,27 @@
 	int sync_req;
 	int nospares = 0;
 
-	mddev->utime = get_seconds();
-	if (mddev->external)
-		return;
 repeat:
+	/* First make sure individual recovery_offsets are correct */
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
+		if (rdev->raid_disk >= 0 &&
+		    mddev->delta_disks >= 0 &&
+		    !test_bit(In_sync, &rdev->flags) &&
+		    mddev->curr_resync_completed > rdev->recovery_offset)
+				rdev->recovery_offset = mddev->curr_resync_completed;
+
+	}	
+	if (mddev->external || !mddev->persistent) {
+		clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+		clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+		wake_up(&mddev->sb_wait);
+		return;
+	}
+
 	spin_lock_irq(&mddev->write_lock);
 
+	mddev->utime = get_seconds();
+
 	set_bit(MD_CHANGE_PENDING, &mddev->flags);
 	if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
 		force_change = 1;
@@ -2221,19 +2226,6 @@
 		MD_BUG();
 		mddev->events --;
 	}
-
-	/*
-	 * do not write anything to disk if using
-	 * nonpersistent superblocks
-	 */
-	if (!mddev->persistent) {
-		if (!mddev->external)
-			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-
-		spin_unlock_irq(&mddev->write_lock);
-		wake_up(&mddev->sb_wait);
-		return;
-	}
 	sync_sbs(mddev, nospares);
 	spin_unlock_irq(&mddev->write_lock);
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 73cc74f..ad83a4d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,8 +787,8 @@
 	struct bio_list bl;
 	struct page **behind_pages = NULL;
 	const int rw = bio_data_dir(bio);
-	const bool do_sync = (bio->bi_rw & REQ_SYNC);
-	bool do_barriers;
+	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
+	unsigned long do_barriers;
 	mdk_rdev_t *blocked_rdev;
 
 	/*
@@ -1120,6 +1120,8 @@
 {
 	int i;
 	conf_t *conf = mddev->private;
+	int count = 0;
+	unsigned long flags;
 
 	/*
 	 * Find all failed disks within the RAID1 configuration 
@@ -1131,15 +1133,16 @@
 		if (rdev
 		    && !test_bit(Faulty, &rdev->flags)
 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
-			unsigned long flags;
-			spin_lock_irqsave(&conf->device_lock, flags);
-			mddev->degraded--;
-			spin_unlock_irqrestore(&conf->device_lock, flags);
+			count++;
+			sysfs_notify_dirent(rdev->sysfs_state);
 		}
 	}
+	spin_lock_irqsave(&conf->device_lock, flags);
+	mddev->degraded -= count;
+	spin_unlock_irqrestore(&conf->device_lock, flags);
 
 	print_conf(conf);
-	return 0;
+	return count;
 }
 
 
@@ -1640,7 +1643,7 @@
 			 * We already have a nr_pending reference on these rdevs.
 			 */
 			int i;
-			const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
+			const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
 			clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
 			clear_bit(R1BIO_Barrier, &r1_bio->state);
 			for (i=0; i < conf->raid_disks; i++)
@@ -1696,7 +1699,7 @@
 				       (unsigned long long)r1_bio->sector);
 				raid_end_bio_io(r1_bio);
 			} else {
-				const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
+				const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
 				r1_bio->bios[r1_bio->read_disk] =
 					mddev->ro ? IO_BLOCKED : NULL;
 				r1_bio->read_disk = disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a88aeb5..8471838 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,7 +799,7 @@
 	int i;
 	int chunk_sects = conf->chunk_mask + 1;
 	const int rw = bio_data_dir(bio);
-	const bool do_sync = (bio->bi_rw & REQ_SYNC);
+	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
 	struct bio_list bl;
 	unsigned long flags;
 	mdk_rdev_t *blocked_rdev;
@@ -1116,6 +1116,8 @@
 	int i;
 	conf_t *conf = mddev->private;
 	mirror_info_t *tmp;
+	int count = 0;
+	unsigned long flags;
 
 	/*
 	 * Find all non-in_sync disks within the RAID10 configuration
@@ -1126,15 +1128,16 @@
 		if (tmp->rdev
 		    && !test_bit(Faulty, &tmp->rdev->flags)
 		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
-			unsigned long flags;
-			spin_lock_irqsave(&conf->device_lock, flags);
-			mddev->degraded--;
-			spin_unlock_irqrestore(&conf->device_lock, flags);
+			count++;
+			sysfs_notify_dirent(tmp->rdev->sysfs_state);
 		}
 	}
+	spin_lock_irqsave(&conf->device_lock, flags);
+	mddev->degraded -= count;
+	spin_unlock_irqrestore(&conf->device_lock, flags);
 
 	print_conf(conf);
-	return 0;
+	return count;
 }
 
 
@@ -1734,7 +1737,7 @@
 				raid_end_bio_io(r10_bio);
 				bio_put(bio);
 			} else {
-				const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+				const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
 				bio_put(bio);
 				rdev = conf->mirrors[mirror].rdev;
 				if (printk_ratelimit())
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 866d4b5..69b0a16 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5330,6 +5330,8 @@
 	int i;
 	raid5_conf_t *conf = mddev->private;
 	struct disk_info *tmp;
+	int count = 0;
+	unsigned long flags;
 
 	for (i = 0; i < conf->raid_disks; i++) {
 		tmp = conf->disks + i;
@@ -5337,14 +5339,15 @@
 		    && tmp->rdev->recovery_offset == MaxSector
 		    && !test_bit(Faulty, &tmp->rdev->flags)
 		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
-			unsigned long flags;
-			spin_lock_irqsave(&conf->device_lock, flags);
-			mddev->degraded--;
-			spin_unlock_irqrestore(&conf->device_lock, flags);
+			count++;
+			sysfs_notify_dirent(tmp->rdev->sysfs_state);
 		}
 	}
+	spin_lock_irqsave(&conf->device_lock, flags);
+	mddev->degraded -= count;
+	spin_unlock_irqrestore(&conf->device_lock, flags);
 	print_raid5_conf(conf);
-	return 0;
+	return count;
 }
 
 static int raid5_remove_disk(mddev_t *mddev, int number)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0efe631..d80cfdc 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -86,7 +86,9 @@
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
 	INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
+#ifdef CONFIG_PM
 	host->pm_notify.notifier_call = mmc_pm_notify;
+#endif
 
 	/*
 	 * By default, hosts do not support SGIO or large requests.
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 283190b..68d1279 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -132,7 +132,7 @@
 
 config MMC_SDHCI_S3C
 	tristate "SDHCI support on Samsung S3C SoC"
-	depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
+	depends on MMC_SDHCI && PLAT_SAMSUNG
 	help
 	  This selects the Secure Digital Host Controller Interface (SDHCI)
 	  often referrered to as the HSMMC block in some of the Samsung S3C
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 0a7f261..71ad416 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -242,7 +242,7 @@
 {
 	struct sdhci_host *host = platform_get_drvdata(dev);
 	if (host) {
-		mutex_lock(&host->lock);
+		spin_lock(&host->lock);
 		if (state) {
 			dev_dbg(&dev->dev, "card inserted.\n");
 			host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -252,8 +252,8 @@
 			host->flags |= SDHCI_DEVICE_DEAD;
 			host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 		}
-		sdhci_card_detect(host);
-		mutex_unlock(&host->lock);
+		tasklet_schedule(&host->card_tasklet);
+		spin_unlock(&host->lock);
 	}
 }
 
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7855121..401527d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1180,7 +1180,8 @@
 	else
 		ctrl &= ~SDHCI_CTRL_4BITBUS;
 
-	if (ios->timing == MMC_TIMING_SD_HS)
+	if (ios->timing == MMC_TIMING_SD_HS &&
+	    !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
 		ctrl |= SDHCI_CTRL_HISPD;
 	else
 		ctrl &= ~SDHCI_CTRL_HISPD;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 036cfae..d316bc7 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -245,6 +245,8 @@
 #define SDHCI_QUIRK_MISSING_CAPS			(1<<27)
 /* Controller uses Auto CMD12 command to stop the transfer */
 #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12		(1<<28)
+/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
+#define SDHCI_QUIRK_NO_HISPD_BIT			(1<<29)
 
 	int			irq;		/* Device IRQ */
 	void __iomem *		ioaddr;		/* Mapped address */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 00af55d..fe63f6b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -22,6 +22,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/concat.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a3c7473..d551ddd 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2866,6 +2866,7 @@
 		 */
 		if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
 				id_data[0] == NAND_MFR_SAMSUNG &&
+				(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
 				id_data[5] != 0x00) {
 			/* Calc pagesize */
 			mtd->writesize = 2048 << (extid & 0x03);
@@ -2934,14 +2935,10 @@
 		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
 
 	/* Set the bad block position */
-	if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO ||
-				(*maf_id == NAND_MFR_SAMSUNG &&
-				 mtd->writesize == 512) ||
-				*maf_id == NAND_MFR_AMD))
-		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
-	else
+	if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
 		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
-
+	else
+		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
 
 	/* Get chip options, preserve non chip based options */
 	chip->options &= ~NAND_CHIPOPTIONS_MSK;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index e02fa4f..4d89f37 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -363,7 +363,7 @@
 #define tAR_NDTR1(r)	(((r) >> 0) & 0xf)
 
 /* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk)	(int)(((ns) * (clk / 1000000) / 1000) - 1)
+#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
 
 /* convert nand flash controller clock cycles to nano-seconds */
 #define cycle2ns(c, clk)	((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 08e7b6a..8ed30fa 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -58,6 +58,7 @@
 #define USB_PRODUCT_IPHONE      0x1290
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
+#define USB_PRODUCT_IPHONE_4	0x1297
 
 #define IPHETH_USBINTF_CLASS    255
 #define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@
 		USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
 		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
 		IPHETH_USBINTF_PROTO) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(
+		USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
+		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+		IPHETH_USBINTF_PROTO) },
 	{ }
 };
 MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0d5de25..373dcfe 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -48,6 +48,7 @@
 #include <linux/netdevice.h>
 #include <linux/cache.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 #include <linux/ethtool.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
@@ -476,6 +477,26 @@
 	int ret;
 	u8 csz;
 
+	/*
+	 * L0s needs to be disabled on all ath5k cards.
+	 *
+	 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
+	 * by default in the future in 2.6.36) this will also mean both L1 and
+	 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
+	 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
+	 * though but cannot currently undue the effect of a blacklist, for
+	 * details you can read pcie_aspm_sanity_check() and see how it adjusts
+	 * the device link capability.
+	 *
+	 * It may be possible in the future to implement some PCI API to allow
+	 * drivers to override blacklists for pre 1.1 PCIe but for now it is
+	 * best to accept that both L0s and L1 will be disabled completely for
+	 * distributions shipping with CONFIG_PCIEASPM rather than having this
+	 * issue present. Motivation for adding this new API will be to help
+	 * with power consumption for some of these devices.
+	 */
+	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
 	ret = pci_enable_device(pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "can't enable device\n");
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 8750c55..7f48df1 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -191,6 +191,7 @@
 #define AR9287_EEP_NO_BACK_VER       AR9287_EEP_MINOR_VER_1
 
 #define AR9287_EEP_START_LOC            128
+#define AR9287_HTC_EEP_START_LOC        256
 #define AR9287_NUM_2G_CAL_PIERS         3
 #define AR9287_NUM_2G_CCK_TARGET_POWERS 3
 #define AR9287_NUM_2G_20_TARGET_POWERS  3
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 4a52cf0..dff2da7 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -34,9 +34,14 @@
 	struct ar9287_eeprom *eep = &ah->eeprom.map9287;
 	struct ath_common *common = ath9k_hw_common(ah);
 	u16 *eep_data;
-	int addr, eep_start_loc = AR9287_EEP_START_LOC;
+	int addr, eep_start_loc;
 	eep_data = (u16 *)eep;
 
+	if (ah->hw_version.devid == 0x7015)
+		eep_start_loc = AR9287_HTC_EEP_START_LOC;
+	else
+		eep_start_loc = AR9287_EEP_START_LOC;
+
 	if (!ath9k_hw_use_flash(ah)) {
 		ath_print(common, ATH_DBG_EEPROM,
 			  "Reading from EEPROM, not flash\n");
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 61c1bee..17e7a9a 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -799,7 +799,7 @@
 	}
 	kfree(buf);
 
-	if (hif_dev->device_id == 0x7010)
+	if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015))
 		firm_offset = AR7010_FIRMWARE_TEXT;
 	else
 		firm_offset = AR9271_FIRMWARE_TEXT;
@@ -901,6 +901,7 @@
 
 	switch(hif_dev->device_id) {
 	case 0x7010:
+	case 0x7015:
 	case 0x9018:
 		if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
 			hif_dev->fw_name = FIRMWARE_AR7010_1_1;
@@ -912,11 +913,6 @@
 		break;
 	}
 
-	if (!hif_dev->fw_name) {
-		dev_err(&udev->dev, "Can't determine firmware !\n");
-		goto err_htc_hw_alloc;
-	}
-
 	ret = ath9k_hif_usb_dev_init(hif_dev);
 	if (ret) {
 		ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 148b433..2d42791 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -245,6 +245,7 @@
 
 	switch(devid) {
 	case 0x7010:
+	case 0x7015:
 	case 0x9018:
 		priv->htc->credits = 45;
 		break;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index ebed9d1..7d09b4b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -366,7 +366,8 @@
 		caps = WLAN_RC_HT_FLAG;
 		if (sta->ht_cap.mcs.rx_mask[1])
 			caps |= WLAN_RC_DS_FLAG;
-		if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+		if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
+		     (conf_is_ht40(&priv->hw->conf)))
 			caps |= WLAN_RC_40_FLAG;
 		if (conf_is_ht40(&priv->hw->conf) &&
 		    (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index bd0b4ac..2a6e45a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -78,18 +78,23 @@
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_sta *sta = tx_info->control.sta;
 	struct ath9k_htc_sta *ista;
-	struct ath9k_htc_vif *avp;
 	struct ath9k_htc_tx_ctl tx_ctl;
 	enum htc_endpoint_id epid;
 	u16 qnum;
 	__le16 fc;
 	u8 *tx_fhdr;
-	u8 sta_idx;
+	u8 sta_idx, vif_idx;
 
 	hdr = (struct ieee80211_hdr *) skb->data;
 	fc = hdr->frame_control;
 
-	avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
+	if (tx_info->control.vif &&
+			(struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
+		vif_idx = ((struct ath9k_htc_vif *)
+				tx_info->control.vif->drv_priv)->index;
+	else
+		vif_idx = priv->nvifs;
+
 	if (sta) {
 		ista = (struct ath9k_htc_sta *) sta->drv_priv;
 		sta_idx = ista->index;
@@ -106,7 +111,7 @@
 		memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
 
 		tx_hdr.node_idx = sta_idx;
-		tx_hdr.vif_idx = avp->index;
+		tx_hdr.vif_idx = vif_idx;
 
 		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
 			tx_ctl.type = ATH9K_HTC_AMPDU;
@@ -169,7 +174,7 @@
 		tx_ctl.type = ATH9K_HTC_NORMAL;
 
 		mgmt_hdr.node_idx = sta_idx;
-		mgmt_hdr.vif_idx = avp->index;
+		mgmt_hdr.vif_idx = vif_idx;
 		mgmt_hdr.tidno = 0;
 		mgmt_hdr.flags = 0;
 
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 633e3d9..d01c4ad 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -899,6 +899,7 @@
 
 #define AR_DEVID_7010(_ah) \
 	(((_ah)->hw_version.devid == 0x7010) || \
+	 ((_ah)->hw_version.devid == 0x7015) || \
 	 ((_ah)->hw_version.devid == 0x9018))
 
 #define AR_RADIO_SREV_MAJOR                   0xf0
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 16bbfa3..1189dbb 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6665,12 +6665,13 @@
 	printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
 	printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
 
+	pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+
 	ret = pci_register_driver(&ipw2100_pci_driver);
 	if (ret)
 		goto out;
 
-	pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
-			   PM_QOS_DEFAULT_VALUE);
 #ifdef CONFIG_IPW2100_DEBUG
 	ipw2100_debug_level = debug;
 	ret = driver_create_file(&ipw2100_pci_driver.driver,
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index a37b30c..ce3722f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -484,7 +484,7 @@
 
 	cmd->timeout = timeout;
 
-	ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+	ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
 	if (ret < 0) {
 		wl1251_error("cmd trigger scan to failed: %d", ret);
 		goto out;
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index e058c2b..ca05aef 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -938,10 +938,11 @@
 	/* SPLV laptop */
 	if (hotk->methods->brightness_set) {
 		if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
-				    value, NULL))
+				    value, NULL)) {
 			printk(KERN_WARNING
 			       "Asus ACPI: Error changing brightness\n");
 			ret = -EIO;
+		}
 		goto out;
 	}
 
@@ -953,10 +954,11 @@
 					      hotk->methods->brightness_down,
 					      NULL, NULL);
 		(value > 0) ? value-- : value++;
-		if (ACPI_FAILURE(status))
+		if (ACPI_FAILURE(status)) {
 			printk(KERN_WARNING
 			       "Asus ACPI: Error changing brightness\n");
 			ret = -EIO;
+		}
 	}
 out:
 	return ret;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index d071ce0..097083c 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -841,6 +841,14 @@
 		.callback = dmi_check_cb
 	},
 	{
+		.ident = "Dell Mini 1012",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+		},
+		.callback = dmi_check_cb
+	},
+	{
 		.ident = "Dell Inspiron 11z",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1092,5 +1100,6 @@
 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*");
 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*");
 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*");
+MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*");
 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*");
 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*");
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b41ed5c..4413975 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -122,6 +122,13 @@
 		},
 	},
 	{
+		.ident = "Dell Mini 1012",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+		},
+	},
+	{
 		.ident = "Dell Inspiron 11z",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index afe82e5..9024480a 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1342,8 +1342,10 @@
 		limits = &ips_lv_limits;
 	else if (strstr(boot_cpu_data.x86_model_id, "CPU       U"))
 		limits = &ips_ulv_limits;
-	else
+	else {
 		dev_info(&ips->dev->dev, "No CPUID match found.\n");
+		goto out;
+	}
 
 	rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
 	tdp = turbo_power & TURBO_TDP_MASK;
@@ -1432,6 +1434,12 @@
 
 	spin_lock_init(&ips->turbo_status_lock);
 
+	ret = pci_enable_device(dev);
+	if (ret) {
+		dev_err(&dev->dev, "can't enable PCI device, aborting\n");
+		goto error_free;
+	}
+
 	if (!pci_resource_start(dev, 0)) {
 		dev_err(&dev->dev, "TBAR not assigned, aborting\n");
 		ret = -ENXIO;
@@ -1444,11 +1452,6 @@
 		goto error_free;
 	}
 
-	ret = pci_enable_device(dev);
-	if (ret) {
-		dev_err(&dev->dev, "can't enable PCI device, aborting\n");
-		goto error_free;
-	}
 
 	ips->regmap = ioremap(pci_resource_start(dev, 0),
 			      pci_resource_len(dev, 0));
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 5d6119b..e35ed12 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1911,6 +1911,17 @@
 	TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
 	TP_ACPI_HOTKEYSCAN_MUTE,
 	TP_ACPI_HOTKEYSCAN_THINKPAD,
+	TP_ACPI_HOTKEYSCAN_UNK1,
+	TP_ACPI_HOTKEYSCAN_UNK2,
+	TP_ACPI_HOTKEYSCAN_UNK3,
+	TP_ACPI_HOTKEYSCAN_UNK4,
+	TP_ACPI_HOTKEYSCAN_UNK5,
+	TP_ACPI_HOTKEYSCAN_UNK6,
+	TP_ACPI_HOTKEYSCAN_UNK7,
+	TP_ACPI_HOTKEYSCAN_UNK8,
+
+	/* Hotkey keymap size */
+	TPACPI_HOTKEY_MAP_LEN
 };
 
 enum {	/* Keys/events available through NVRAM polling */
@@ -3082,6 +3093,8 @@
 	TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
 };
 
+typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+
 static int __init hotkey_init(struct ibm_init_struct *iibm)
 {
 	/* Requirements for changing the default keymaps:
@@ -3113,9 +3126,17 @@
 	 * If the above is too much to ask, don't change the keymap.
 	 * Ask the thinkpad-acpi maintainer to do it, instead.
 	 */
-	static u16 ibm_keycode_map[] __initdata = {
+
+	enum keymap_index {
+		TPACPI_KEYMAP_IBM_GENERIC = 0,
+		TPACPI_KEYMAP_LENOVO_GENERIC,
+	};
+
+	static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = {
+	/* Generic keymap for IBM ThinkPads */
+	[TPACPI_KEYMAP_IBM_GENERIC] = {
 		/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
-		KEY_FN_F1,	KEY_FN_F2,	KEY_COFFEE,	KEY_SLEEP,
+		KEY_FN_F1,	KEY_BATTERY,	KEY_COFFEE,	KEY_SLEEP,
 		KEY_WLAN,	KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
 		KEY_FN_F9,	KEY_FN_F10,	KEY_FN_F11,	KEY_SUSPEND,
 
@@ -3146,11 +3167,13 @@
 		/* (assignments unknown, please report if found) */
 		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
 		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
-	};
-	static u16 lenovo_keycode_map[] __initdata = {
+		},
+
+	/* Generic keymap for Lenovo ThinkPads */
+	[TPACPI_KEYMAP_LENOVO_GENERIC] = {
 		/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
 		KEY_FN_F1,	KEY_COFFEE,	KEY_BATTERY,	KEY_SLEEP,
-		KEY_WLAN,	KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+		KEY_WLAN,	KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
 		KEY_FN_F9,	KEY_FN_F10,	KEY_FN_F11,	KEY_SUSPEND,
 
 		/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
@@ -3189,11 +3212,25 @@
 		/* (assignments unknown, please report if found) */
 		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
 		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+		},
 	};
 
-#define TPACPI_HOTKEY_MAP_LEN		ARRAY_SIZE(ibm_keycode_map)
-#define TPACPI_HOTKEY_MAP_SIZE		sizeof(ibm_keycode_map)
-#define TPACPI_HOTKEY_MAP_TYPESIZE	sizeof(ibm_keycode_map[0])
+	static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = {
+		/* Generic maps (fallback) */
+		{
+		  .vendor = PCI_VENDOR_ID_IBM,
+		  .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+		  .quirks = TPACPI_KEYMAP_IBM_GENERIC,
+		},
+		{
+		  .vendor = PCI_VENDOR_ID_LENOVO,
+		  .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+		  .quirks = TPACPI_KEYMAP_LENOVO_GENERIC,
+		},
+	};
+
+#define TPACPI_HOTKEY_MAP_SIZE		sizeof(tpacpi_keymap_t)
+#define TPACPI_HOTKEY_MAP_TYPESIZE	sizeof(tpacpi_keymap_t[0])
 
 	int res, i;
 	int status;
@@ -3202,6 +3239,7 @@
 	bool tabletsw_state = false;
 
 	unsigned long quirks;
+	unsigned long keymap_id;
 
 	vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
 			"initializing hotkey subdriver\n");
@@ -3342,7 +3380,6 @@
 		goto err_exit;
 
 	/* Set up key map */
-
 	hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
 					GFP_KERNEL);
 	if (!hotkey_keycode_map) {
@@ -3352,17 +3389,14 @@
 		goto err_exit;
 	}
 
-	if (tpacpi_is_lenovo()) {
-		dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
-			   "using Lenovo default hot key map\n");
-		memcpy(hotkey_keycode_map, &lenovo_keycode_map,
-			TPACPI_HOTKEY_MAP_SIZE);
-	} else {
-		dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
-			   "using IBM default hot key map\n");
-		memcpy(hotkey_keycode_map, &ibm_keycode_map,
-			TPACPI_HOTKEY_MAP_SIZE);
-	}
+	keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,
+					ARRAY_SIZE(tpacpi_keymap_qtable));
+	BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps));
+	dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+		   "using keymap number %lu\n", keymap_id);
+
+	memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id],
+		TPACPI_HOTKEY_MAP_SIZE);
 
 	input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
 	tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
@@ -3469,7 +3503,8 @@
 	*send_acpi_ev = true;
 	*ignore_acpi_ev = false;
 
-	if (scancode > 0 && scancode < 0x21) {
+	/* HKEY event 0x1001 is scancode 0x00 */
+	if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) {
 		scancode--;
 		if (!(hotkey_source_mask & (1 << scancode))) {
 			tpacpi_input_send_key_masked(scancode);
@@ -6080,13 +6115,18 @@
 
 /* --------------------------------------------------------------------- */
 
+/*
+ * Call _BCL method of video device.  On some ThinkPads this will
+ * switch the firmware to the ACPI brightness control mode.
+ */
+
 static int __init tpacpi_query_bcl_levels(acpi_handle handle)
 {
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 	union acpi_object *obj;
 	int rc;
 
-	if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
+	if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
 		obj = (union acpi_object *)buffer.pointer;
 		if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
 			printk(TPACPI_ERR "Unknown _BCL data, "
@@ -6103,55 +6143,22 @@
 	return rc;
 }
 
-static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
-					u32 lvl, void *context, void **rv)
-{
-	char name[ACPI_PATH_SEGMENT_LENGTH];
-	struct acpi_buffer buffer = { sizeof(name), &name };
-
-	if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
-	    !strncmp("_BCL", name, sizeof(name) - 1)) {
-		BUG_ON(!rv || !*rv);
-		**(int **)rv = tpacpi_query_bcl_levels(handle);
-		return AE_CTRL_TERMINATE;
-	} else {
-		return AE_OK;
-	}
-}
 
 /*
  * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
  */
 static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
 {
-	int status;
+	acpi_handle video_device;
 	int bcl_levels = 0;
-	void *bcl_ptr = &bcl_levels;
 
-	if (!vid_handle)
-		TPACPI_ACPIHANDLE_INIT(vid);
+	tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+	if (video_device)
+		bcl_levels = tpacpi_query_bcl_levels(video_device);
 
-	if (!vid_handle)
-		return 0;
+	tp_features.bright_acpimode = (bcl_levels > 0);
 
-	/*
-	 * Search for a _BCL method, and execute it.  This is safe on all
-	 * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
-	 * BIOS in ACPI backlight control mode.  We do NOT have to care
-	 * about calling the _BCL method in an enabled video device, any
-	 * will do for our purposes.
-	 */
-
-	status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
-				     tpacpi_acpi_walk_find_bcl, NULL, NULL,
-				     &bcl_ptr);
-
-	if (ACPI_SUCCESS(status) && bcl_levels > 2) {
-		tp_features.bright_acpimode = 1;
-		return bcl_levels - 2;
-	}
-
-	return 0;
+	return (bcl_levels > 2) ? (bcl_levels - 2) : 0;
 }
 
 /*
@@ -6244,28 +6251,6 @@
 	if (tp_features.bright_unkfw)
 		return 1;
 
-	if (tp_features.bright_acpimode) {
-		if (acpi_video_backlight_support()) {
-			if (brightness_enable > 1) {
-				printk(TPACPI_NOTICE
-				       "Standard ACPI backlight interface "
-				       "available, not loading native one.\n");
-				return 1;
-			} else if (brightness_enable == 1) {
-				printk(TPACPI_NOTICE
-				       "Backlight control force enabled, even if standard "
-				       "ACPI backlight interface is available\n");
-			}
-		} else {
-			if (brightness_enable > 1) {
-				printk(TPACPI_NOTICE
-				       "Standard ACPI backlight interface not "
-				       "available, thinkpad_acpi native "
-				       "brightness control enabled\n");
-			}
-		}
-	}
-
 	if (!brightness_enable) {
 		dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
 			   "brightness support disabled by "
@@ -6273,6 +6258,26 @@
 		return 1;
 	}
 
+	if (acpi_video_backlight_support()) {
+		if (brightness_enable > 1) {
+			printk(TPACPI_INFO
+			       "Standard ACPI backlight interface "
+			       "available, not loading native one.\n");
+			return 1;
+		} else if (brightness_enable == 1) {
+			printk(TPACPI_WARN
+				"Cannot enable backlight brightness support, "
+				"ACPI is already handling it.  Refer to the "
+				"acpi_backlight kernel parameter\n");
+			return 1;
+		}
+	} else if (tp_features.bright_acpimode && brightness_enable > 1) {
+		printk(TPACPI_NOTICE
+			"Standard ACPI backlight interface not "
+			"available, thinkpad_acpi native "
+			"brightness control enabled\n");
+	}
+
 	/*
 	 * Check for module parameter bogosity, note that we
 	 * init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 95a895d..c8dc392 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -56,6 +56,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/timer.h>
+#include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/aer.h>
 #include <asm/dma.h>
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index f065204..95a26fb 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -132,7 +132,7 @@
 int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
 void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
 void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
-inline void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
+void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e031a73..5d4a382 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1418,7 +1418,7 @@
 	return QLA_SUCCESS;
 }
 
-inline void
+void
 qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
 {
 	uint32_t drv_active;
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 659a695..2af8fd1 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -14,11 +14,10 @@
 #include <linux/slab.h>
 #include <linux/serial_core.h>
 #include <linux/serial_8250.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/nwpserial.h>
 
-#include <asm/prom.h>
-
 struct of_serial_info {
 	int type;
 	int line;
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 544f2e2..6381a02 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -55,7 +55,12 @@
 int sunserial_console_match(struct console *con, struct device_node *dp,
 			    struct uart_driver *drv, int line, bool ignore_line)
 {
-	if (!con || of_console_device != dp)
+	if (!con)
+		return 0;
+
+	drv->cons = con;
+
+	if (of_console_device != dp)
 		return 0;
 
 	if (!ignore_line) {
@@ -69,12 +74,10 @@
 			return 0;
 	}
 
-	con->index = line;
-	drv->cons = con;
-
-	if (!console_set_on_cmdline)
+	if (!console_set_on_cmdline) {
+		con->index = line;
 		add_preferred_console(con->name, line, NULL);
-
+	}
 	return 1;
 }
 EXPORT_SYMBOL(sunserial_console_match);
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
index 59be3ef..052b3c7 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/coldfire_qspi.c
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
+#include <linux/sched.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/io.h>
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
index cdc4dd5..8ec83d2 100644
--- a/drivers/staging/pohmelfs/path_entry.c
+++ b/drivers/staging/pohmelfs/path_entry.c
@@ -44,9 +44,9 @@
 		return -ENOENT;
 	}
 
-	read_lock(&current->fs->lock);
+	spin_lock(&current->fs->lock);
 	path.mnt = mntget(current->fs->root.mnt);
-	read_unlock(&current->fs->lock);
+	spin_unlock(&current->fs->lock);
 
 	path.dentry = d;
 
@@ -91,9 +91,9 @@
 		return -ENOENT;
 	}
 
-	read_lock(&current->fs->lock);
+	spin_lock(&current->fs->lock);
 	root = dget(current->fs->root.dentry);
-	read_unlock(&current->fs->lock);
+	spin_unlock(&current->fs->lock);
 
 	spin_lock(&dcache_lock);
 
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index afe21e6..1c2c683 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -80,7 +80,10 @@
 	/*
 	 * Disable CLCD clock source.
 	 */
-	clk_disable(fb->clk);
+	if (fb->clk_enabled) {
+		fb->clk_enabled = false;
+		clk_disable(fb->clk);
+	}
 }
 
 static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
@@ -88,7 +91,10 @@
 	/*
 	 * Enable the CLCD clock source.
 	 */
-	clk_enable(fb->clk);
+	if (!fb->clk_enabled) {
+		fb->clk_enabled = true;
+		clk_enable(fb->clk);
+	}
 
 	/*
 	 * Bring up by first enabling..
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index f3a4e15..f96a471 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -151,13 +151,13 @@
 static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
 #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
 	/*
-	 * memcpy_toio works for us if:
+	 * iowrite32_rep works for us if:
 	 *  (1) Copies data as 32bit quantities, not byte after byte,
 	 *  (2) Performs LE ordered stores, and
 	 *  (3) It copes with unaligned source (destination is guaranteed to be page
 	 *      aligned and length is guaranteed to be multiple of 4).
 	 */
-	memcpy_toio(va.vaddr, src, len);
+	iowrite32_rep(va.vaddr, src, len >> 2);
 #else
         u_int32_t __iomem* addr = va.vaddr;
 
diff --git a/firmware/Makefile b/firmware/Makefile
index b27f09f..9c2d194 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -142,7 +142,7 @@
 fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
 
 # Directories which we _might_ need to create, so we have a rule for them.
-firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
+firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
 
 quiet_cmd_mkdir = MKDIR   $(patsubst $(objtree)/%,%,$@)
       cmd_mkdir = mkdir -p $@
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 9e60fd2..a7528b9 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -108,7 +108,7 @@
 	Node *fmt;
 	struct file * interp_file = NULL;
 	char iname[BINPRM_BUF_SIZE];
-	char *iname_addr = iname;
+	const char *iname_addr = iname;
 	int retval;
 	int fd_binary = -1;
 
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index aca9d55a..396a988 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -16,7 +16,8 @@
 
 static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
 {
-	char *cp, *i_name, *i_arg;
+	const char *i_arg, *i_name;
+	char *cp;
 	struct file *file;
 	char interp[BINPRM_BUF_SIZE];
 	int retval;
diff --git a/fs/buffer.c b/fs/buffer.c
index 50efa33..3e7dca2 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -770,11 +770,12 @@
 				spin_unlock(lock);
 				/*
 				 * Ensure any pending I/O completes so that
-				 * ll_rw_block() actually writes the current
-				 * contents - it is a noop if I/O is still in
-				 * flight on potentially older contents.
+				 * write_dirty_buffer() actually writes the
+				 * current contents - it is a noop if I/O is
+				 * still in flight on potentially older
+				 * contents.
 				 */
-				ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+				write_dirty_buffer(bh, WRITE_SYNC_PLUG);
 
 				/*
 				 * Kick off IO for the previous mapping. Note
@@ -2912,13 +2913,6 @@
 	BUG_ON(buffer_unwritten(bh));
 
 	/*
-	 * Mask in barrier bit for a write (could be either a WRITE or a
-	 * WRITE_SYNC
-	 */
-	if (buffer_ordered(bh) && (rw & WRITE))
-		rw |= WRITE_BARRIER;
-
-	/*
 	 * Only clear out a write error when rewriting
 	 */
 	if (test_set_buffer_req(bh) && (rw & WRITE))
@@ -2956,22 +2950,21 @@
 
 /**
  * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
  * @nr: number of &struct buffer_heads in the array
  * @bhs: array of pointers to &struct buffer_head
  *
  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
- * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
- * are sent to disk. The fourth %READA option is described in the documentation
- * for generic_make_request() which ll_rw_block() calls.
+ * %READA option is described in the documentation for generic_make_request()
+ * which ll_rw_block() calls.
  *
  * This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
- * clean when doing a write request, and any buffer that appears to be
- * up-to-date when doing read request.  Further it marks as clean buffers that
- * are processed for writing (the buffer cache won't assume that they are
- * actually clean until the buffer gets unlocked).
+ * BH_Lock state bit), any buffer that appears to be clean when doing a write
+ * request, and any buffer that appears to be up-to-date when doing read
+ * request.  Further it marks as clean buffers that are processed for
+ * writing (the buffer cache won't assume that they are actually clean
+ * until the buffer gets unlocked).
  *
  * ll_rw_block sets b_end_io to simple completion handler that marks
  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2987,20 +2980,13 @@
 	for (i = 0; i < nr; i++) {
 		struct buffer_head *bh = bhs[i];
 
-		if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
-			lock_buffer(bh);
-		else if (!trylock_buffer(bh))
+		if (!trylock_buffer(bh))
 			continue;
-
-		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
-		    rw == SWRITE_SYNC_PLUG) {
+		if (rw == WRITE) {
 			if (test_clear_buffer_dirty(bh)) {
 				bh->b_end_io = end_buffer_write_sync;
 				get_bh(bh);
-				if (rw == SWRITE_SYNC)
-					submit_bh(WRITE_SYNC, bh);
-				else
-					submit_bh(WRITE, bh);
+				submit_bh(WRITE, bh);
 				continue;
 			}
 		} else {
@@ -3016,12 +3002,25 @@
 }
 EXPORT_SYMBOL(ll_rw_block);
 
+void write_dirty_buffer(struct buffer_head *bh, int rw)
+{
+	lock_buffer(bh);
+	if (!test_clear_buffer_dirty(bh)) {
+		unlock_buffer(bh);
+		return;
+	}
+	bh->b_end_io = end_buffer_write_sync;
+	get_bh(bh);
+	submit_bh(rw, bh);
+}
+EXPORT_SYMBOL(write_dirty_buffer);
+
 /*
  * For a data-integrity writeout, we need to wait upon any in-progress I/O
  * and then start new I/O and then wait upon it.  The caller must have a ref on
  * the buffer_head.
  */
-int sync_dirty_buffer(struct buffer_head *bh)
+int __sync_dirty_buffer(struct buffer_head *bh, int rw)
 {
 	int ret = 0;
 
@@ -3030,7 +3029,7 @@
 	if (test_clear_buffer_dirty(bh)) {
 		get_bh(bh);
 		bh->b_end_io = end_buffer_write_sync;
-		ret = submit_bh(WRITE_SYNC, bh);
+		ret = submit_bh(rw, bh);
 		wait_on_buffer(bh);
 		if (buffer_eopnotsupp(bh)) {
 			clear_buffer_eopnotsupp(bh);
@@ -3043,6 +3042,12 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL(__sync_dirty_buffer);
+
+int sync_dirty_buffer(struct buffer_head *bh)
+{
+	return __sync_dirty_buffer(bh, WRITE_SYNC);
+}
 EXPORT_SYMBOL(sync_dirty_buffer);
 
 /*
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index a53b130..1e7a330 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -80,7 +80,7 @@
 		}
 	} else {
 		inode = iget_locked(sb, CRAMINO(cramfs_inode));
-		if (inode) {
+		if (inode && (inode->i_state & I_NEW)) {
 			setup_inode(inode, cramfs_inode);
 			unlock_new_inode(inode);
 		}
diff --git a/fs/dcache.c b/fs/dcache.c
index 4d13bf5..83293be 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1332,31 +1332,13 @@
  * d_lookup - search for a dentry
  * @parent: parent dentry
  * @name: qstr of name we wish to find
+ * Returns: dentry, or NULL
  *
- * Searches the children of the parent dentry for the name in question. If
- * the dentry is found its reference count is incremented and the dentry
- * is returned. The caller must use dput to free the entry when it has
- * finished using it. %NULL is returned on failure.
- *
- * __d_lookup is dcache_lock free. The hash list is protected using RCU.
- * Memory barriers are used while updating and doing lockless traversal. 
- * To avoid races with d_move while rename is happening, d_lock is used.
- *
- * Overflows in memcmp(), while d_move, are avoided by keeping the length
- * and name pointer in one structure pointed by d_qstr.
- *
- * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
- * lookup is going on.
- *
- * The dentry unused LRU is not updated even if lookup finds the required dentry
- * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
- * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
- * acquisition.
- *
- * d_lookup() is protected against the concurrent renames in some unrelated
- * directory using the seqlockt_t rename_lock.
+ * d_lookup searches the children of the parent dentry for the name in
+ * question. If the dentry is found its reference count is incremented and the
+ * dentry is returned. The caller must use dput to free the entry when it has
+ * finished using it. %NULL is returned if the dentry does not exist.
  */
-
 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
 {
 	struct dentry * dentry = NULL;
@@ -1372,6 +1354,21 @@
 }
 EXPORT_SYMBOL(d_lookup);
 
+/*
+ * __d_lookup - search for a dentry (racy)
+ * @parent: parent dentry
+ * @name: qstr of name we wish to find
+ * Returns: dentry, or NULL
+ *
+ * __d_lookup is like d_lookup, however it may (rarely) return a
+ * false-negative result due to unrelated rename activity.
+ *
+ * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
+ * however it must be used carefully, eg. with a following d_lookup in
+ * the case of failure.
+ *
+ * __d_lookup callers must be commented.
+ */
 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
 {
 	unsigned int len = name->len;
@@ -1382,6 +1379,19 @@
 	struct hlist_node *node;
 	struct dentry *dentry;
 
+	/*
+	 * The hash list is protected using RCU.
+	 *
+	 * Take d_lock when comparing a candidate dentry, to avoid races
+	 * with d_move().
+	 *
+	 * It is possible that concurrent renames can mess up our list
+	 * walk here and result in missing our dentry, resulting in the
+	 * false-negative result. d_lookup() protects against concurrent
+	 * renames using rename_lock seqlock.
+	 *
+	 * See Documentation/vfs/dcache-locking.txt for more details.
+	 */
 	rcu_read_lock();
 	
 	hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
@@ -1396,8 +1406,8 @@
 
 		/*
 		 * Recheck the dentry after taking the lock - d_move may have
-		 * changed things.  Don't bother checking the hash because we're
-		 * about to compare the whole name anyway.
+		 * changed things. Don't bother checking the hash because
+		 * we're about to compare the whole name anyway.
 		 */
 		if (dentry->d_parent != parent)
 			goto next;
@@ -1925,7 +1935,7 @@
 	bool slash = false;
 	int error = 0;
 
-	spin_lock(&vfsmount_lock);
+	br_read_lock(vfsmount_lock);
 	while (dentry != root->dentry || vfsmnt != root->mnt) {
 		struct dentry * parent;
 
@@ -1954,7 +1964,7 @@
 	if (!error && !slash)
 		error = prepend(buffer, buflen, "/", 1);
 
-	spin_unlock(&vfsmount_lock);
+	br_read_unlock(vfsmount_lock);
 	return error;
 
 global_root:
@@ -2292,11 +2302,12 @@
 	struct vfsmount *mnt = path1->mnt;
 	struct dentry *dentry = path1->dentry;
 	int res;
-	spin_lock(&vfsmount_lock);
+
+	br_read_lock(vfsmount_lock);
 	if (mnt != path2->mnt) {
 		for (;;) {
 			if (mnt->mnt_parent == mnt) {
-				spin_unlock(&vfsmount_lock);
+				br_read_unlock(vfsmount_lock);
 				return 0;
 			}
 			if (mnt->mnt_parent == path2->mnt)
@@ -2306,7 +2317,7 @@
 		dentry = mnt->mnt_mountpoint;
 	}
 	res = is_subdir(dentry, path2->dentry);
-	spin_unlock(&vfsmount_lock);
+	br_read_unlock(vfsmount_lock);
 	return res;
 }
 EXPORT_SYMBOL(path_is_under);
diff --git a/fs/exec.c b/fs/exec.c
index 7761837..2d94552 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -361,13 +361,13 @@
 /*
  * count() counts the number of strings in array ARGV.
  */
-static int count(char __user * __user * argv, int max)
+static int count(const char __user * const __user * argv, int max)
 {
 	int i = 0;
 
 	if (argv != NULL) {
 		for (;;) {
-			char __user * p;
+			const char __user * p;
 
 			if (get_user(p, argv))
 				return -EFAULT;
@@ -387,7 +387,7 @@
  * processes's memory to the new process's stack.  The call to get_user_pages()
  * ensures the destination page is created and not swapped out.
  */
-static int copy_strings(int argc, char __user * __user * argv,
+static int copy_strings(int argc, const char __user *const __user *argv,
 			struct linux_binprm *bprm)
 {
 	struct page *kmapped_page = NULL;
@@ -396,7 +396,7 @@
 	int ret;
 
 	while (argc-- > 0) {
-		char __user *str;
+		const char __user *str;
 		int len;
 		unsigned long pos;
 
@@ -470,12 +470,13 @@
 /*
  * Like copy_strings, but get argv and its values from kernel memory.
  */
-int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
+int copy_strings_kernel(int argc, const char *const *argv,
+			struct linux_binprm *bprm)
 {
 	int r;
 	mm_segment_t oldfs = get_fs();
 	set_fs(KERNEL_DS);
-	r = copy_strings(argc, (char __user * __user *)argv, bprm);
+	r = copy_strings(argc, (const char __user *const  __user *)argv, bprm);
 	set_fs(oldfs);
 	return r;
 }
@@ -997,7 +998,7 @@
 void setup_new_exec(struct linux_binprm * bprm)
 {
 	int i, ch;
-	char * name;
+	const char *name;
 	char tcomm[sizeof(current->comm)];
 
 	arch_pick_mmap_layout(current->mm);
@@ -1117,7 +1118,7 @@
 	bprm->unsafe = tracehook_unsafe_exec(p);
 
 	n_fs = 1;
-	write_lock(&p->fs->lock);
+	spin_lock(&p->fs->lock);
 	rcu_read_lock();
 	for (t = next_thread(p); t != p; t = next_thread(t)) {
 		if (t->fs == p->fs)
@@ -1134,7 +1135,7 @@
 			res = 1;
 		}
 	}
-	write_unlock(&p->fs->lock);
+	spin_unlock(&p->fs->lock);
 
 	return res;
 }
@@ -1316,9 +1317,9 @@
 /*
  * sys_execve() executes a new program.
  */
-int do_execve(char * filename,
-	char __user *__user *argv,
-	char __user *__user *envp,
+int do_execve(const char * filename,
+	const char __user *const __user *argv,
+	const char __user *const __user *envp,
 	struct pt_regs * regs)
 {
 	struct linux_binprm *bprm;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 1fa23f6..1736f23 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -250,7 +250,9 @@
 {
 	int i, err = 0;
 
-	ll_rw_block(SWRITE, nr_bhs, bhs);
+	for (i = 0; i < nr_bhs; i++)
+		write_dirty_buffer(bhs[i], WRITE);
+
 	for (i = 0; i < nr_bhs; i++) {
 		wait_on_buffer(bhs[i]);
 		if (buffer_eopnotsupp(bhs[i])) {
diff --git a/fs/file_table.c b/fs/file_table.c
index edecd36..a04bdd8 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -20,7 +20,9 @@
 #include <linux/cdev.h>
 #include <linux/fsnotify.h>
 #include <linux/sysctl.h>
+#include <linux/lglock.h>
 #include <linux/percpu_counter.h>
+#include <linux/percpu.h>
 #include <linux/ima.h>
 
 #include <asm/atomic.h>
@@ -32,8 +34,8 @@
 	.max_files = NR_FILE
 };
 
-/* public. Not pretty! */
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
+DECLARE_LGLOCK(files_lglock);
+DEFINE_LGLOCK(files_lglock);
 
 /* SLAB cache for file structures */
 static struct kmem_cache *filp_cachep __read_mostly;
@@ -249,7 +251,7 @@
 		cdev_put(inode->i_cdev);
 	fops_put(file->f_op);
 	put_pid(file->f_owner.pid);
-	file_kill(file);
+	file_sb_list_del(file);
 	if (file->f_mode & FMODE_WRITE)
 		drop_file_write_access(file);
 	file->f_path.dentry = NULL;
@@ -328,41 +330,107 @@
 	return file;
 }
 
-
 void put_filp(struct file *file)
 {
 	if (atomic_long_dec_and_test(&file->f_count)) {
 		security_file_free(file);
-		file_kill(file);
+		file_sb_list_del(file);
 		file_free(file);
 	}
 }
 
-void file_move(struct file *file, struct list_head *list)
+static inline int file_list_cpu(struct file *file)
 {
-	if (!list)
-		return;
-	file_list_lock();
-	list_move(&file->f_u.fu_list, list);
-	file_list_unlock();
+#ifdef CONFIG_SMP
+	return file->f_sb_list_cpu;
+#else
+	return smp_processor_id();
+#endif
 }
 
-void file_kill(struct file *file)
+/* helper for file_sb_list_add to reduce ifdefs */
+static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
+{
+	struct list_head *list;
+#ifdef CONFIG_SMP
+	int cpu;
+	cpu = smp_processor_id();
+	file->f_sb_list_cpu = cpu;
+	list = per_cpu_ptr(sb->s_files, cpu);
+#else
+	list = &sb->s_files;
+#endif
+	list_add(&file->f_u.fu_list, list);
+}
+
+/**
+ * file_sb_list_add - add a file to the sb's file list
+ * @file: file to add
+ * @sb: sb to add it to
+ *
+ * Use this function to associate a file with the superblock of the inode it
+ * refers to.
+ */
+void file_sb_list_add(struct file *file, struct super_block *sb)
+{
+	lg_local_lock(files_lglock);
+	__file_sb_list_add(file, sb);
+	lg_local_unlock(files_lglock);
+}
+
+/**
+ * file_sb_list_del - remove a file from the sb's file list
+ * @file: file to remove
+ * @sb: sb to remove it from
+ *
+ * Use this function to remove a file from its superblock.
+ */
+void file_sb_list_del(struct file *file)
 {
 	if (!list_empty(&file->f_u.fu_list)) {
-		file_list_lock();
+		lg_local_lock_cpu(files_lglock, file_list_cpu(file));
 		list_del_init(&file->f_u.fu_list);
-		file_list_unlock();
+		lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
 	}
 }
 
+#ifdef CONFIG_SMP
+
+/*
+ * These macros iterate all files on all CPUs for a given superblock.
+ * files_lglock must be held globally.
+ */
+#define do_file_list_for_each_entry(__sb, __file)		\
+{								\
+	int i;							\
+	for_each_possible_cpu(i) {				\
+		struct list_head *list;				\
+		list = per_cpu_ptr((__sb)->s_files, i);		\
+		list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry				\
+	}							\
+}
+
+#else
+
+#define do_file_list_for_each_entry(__sb, __file)		\
+{								\
+	struct list_head *list;					\
+	list = &(sb)->s_files;					\
+	list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry				\
+}
+
+#endif
+
 int fs_may_remount_ro(struct super_block *sb)
 {
 	struct file *file;
-
 	/* Check that no files are currently opened for writing. */
-	file_list_lock();
-	list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
+	lg_global_lock(files_lglock);
+	do_file_list_for_each_entry(sb, file) {
 		struct inode *inode = file->f_path.dentry->d_inode;
 
 		/* File with pending delete? */
@@ -372,11 +440,11 @@
 		/* Writeable file? */
 		if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
 			goto too_bad;
-	}
-	file_list_unlock();
+	} while_file_list_for_each_entry;
+	lg_global_unlock(files_lglock);
 	return 1; /* Tis' cool bro. */
 too_bad:
-	file_list_unlock();
+	lg_global_unlock(files_lglock);
 	return 0;
 }
 
@@ -392,8 +460,8 @@
 	struct file *f;
 
 retry:
-	file_list_lock();
-	list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
+	lg_global_lock(files_lglock);
+	do_file_list_for_each_entry(sb, f) {
 		struct vfsmount *mnt;
 		if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
 		       continue;
@@ -408,16 +476,13 @@
 			continue;
 		file_release_write(f);
 		mnt = mntget(f->f_path.mnt);
-		file_list_unlock();
-		/*
-		 * This can sleep, so we can't hold
-		 * the file_list_lock() spinlock.
-		 */
+		/* This can sleep, so we can't hold the spinlock. */
+		lg_global_unlock(files_lglock);
 		mnt_drop_write(mnt);
 		mntput(mnt);
 		goto retry;
-	}
-	file_list_unlock();
+	} while_file_list_for_each_entry;
+	lg_global_unlock(files_lglock);
 }
 
 void __init files_init(unsigned long mempages)
@@ -437,5 +502,6 @@
 	if (files_stat.max_files < NR_FILE)
 		files_stat.max_files = NR_FILE;
 	files_defer_init();
+	lg_lock_init(files_lglock);
 	percpu_counter_init(&nr_files, 0);
 } 
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 1ee40eb..ed45a9c 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -13,11 +13,11 @@
 {
 	struct path old_root;
 
-	write_lock(&fs->lock);
+	spin_lock(&fs->lock);
 	old_root = fs->root;
 	fs->root = *path;
 	path_get(path);
-	write_unlock(&fs->lock);
+	spin_unlock(&fs->lock);
 	if (old_root.dentry)
 		path_put(&old_root);
 }
@@ -30,11 +30,11 @@
 {
 	struct path old_pwd;
 
-	write_lock(&fs->lock);
+	spin_lock(&fs->lock);
 	old_pwd = fs->pwd;
 	fs->pwd = *path;
 	path_get(path);
-	write_unlock(&fs->lock);
+	spin_unlock(&fs->lock);
 
 	if (old_pwd.dentry)
 		path_put(&old_pwd);
@@ -51,7 +51,7 @@
 		task_lock(p);
 		fs = p->fs;
 		if (fs) {
-			write_lock(&fs->lock);
+			spin_lock(&fs->lock);
 			if (fs->root.dentry == old_root->dentry
 			    && fs->root.mnt == old_root->mnt) {
 				path_get(new_root);
@@ -64,7 +64,7 @@
 				fs->pwd = *new_root;
 				count++;
 			}
-			write_unlock(&fs->lock);
+			spin_unlock(&fs->lock);
 		}
 		task_unlock(p);
 	} while_each_thread(g, p);
@@ -87,10 +87,10 @@
 	if (fs) {
 		int kill;
 		task_lock(tsk);
-		write_lock(&fs->lock);
+		spin_lock(&fs->lock);
 		tsk->fs = NULL;
 		kill = !--fs->users;
-		write_unlock(&fs->lock);
+		spin_unlock(&fs->lock);
 		task_unlock(tsk);
 		if (kill)
 			free_fs_struct(fs);
@@ -104,7 +104,7 @@
 	if (fs) {
 		fs->users = 1;
 		fs->in_exec = 0;
-		rwlock_init(&fs->lock);
+		spin_lock_init(&fs->lock);
 		fs->umask = old->umask;
 		get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
 	}
@@ -121,10 +121,10 @@
 		return -ENOMEM;
 
 	task_lock(current);
-	write_lock(&fs->lock);
+	spin_lock(&fs->lock);
 	kill = !--fs->users;
 	current->fs = new_fs;
-	write_unlock(&fs->lock);
+	spin_unlock(&fs->lock);
 	task_unlock(current);
 
 	if (kill)
@@ -143,7 +143,7 @@
 /* to be mentioned only in INIT_TASK */
 struct fs_struct init_fs = {
 	.users		= 1,
-	.lock		= __RW_LOCK_UNLOCKED(init_fs.lock),
+	.lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock),
 	.umask		= 0022,
 };
 
@@ -156,14 +156,14 @@
 
 		task_lock(current);
 
-		write_lock(&init_fs.lock);
+		spin_lock(&init_fs.lock);
 		init_fs.users++;
-		write_unlock(&init_fs.lock);
+		spin_unlock(&init_fs.lock);
 
-		write_lock(&fs->lock);
+		spin_lock(&fs->lock);
 		current->fs = &init_fs;
 		kill = !--fs->users;
-		write_unlock(&fs->lock);
+		spin_unlock(&fs->lock);
 
 		task_unlock(current);
 		if (kill)
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 99800e5..6bc9e3a 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -94,6 +94,7 @@
 			if (error < 0)
 				goto failed;
 			inode->i_mode = mode;
+			inode->i_ctime = CURRENT_TIME;
 			if (error == 0) {
 				posix_acl_release(acl);
 				acl = NULL;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index dd1e555..f7dc9b5 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -104,7 +104,7 @@
 		__putname(name);
 		return NULL;
 	}
-	strncpy(name, root, PATH_MAX);
+	strlcpy(name, root, PATH_MAX);
 	if (len > p - name) {
 		__putname(name);
 		return NULL;
@@ -876,7 +876,7 @@
 		char *path = dentry_name(dentry);
 		int err = -ENOMEM;
 		if (path) {
-			int err = hostfs_do_readlink(path, link, PATH_MAX);
+			err = hostfs_do_readlink(path, link, PATH_MAX);
 			if (err == PATH_MAX)
 				err = -E2BIG;
 			__putname(path);
diff --git a/fs/internal.h b/fs/internal.h
index 6b706bc..a6910e9 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -9,6 +9,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <linux/lglock.h>
+
 struct super_block;
 struct linux_binprm;
 struct path;
@@ -70,7 +72,8 @@
 
 extern void __init mnt_init(void);
 
-extern spinlock_t vfsmount_lock;
+DECLARE_BRLOCK(vfsmount_lock);
+
 
 /*
  * fs_struct.c
@@ -80,6 +83,8 @@
 /*
  * file_table.c
  */
+extern void file_sb_list_add(struct file *f, struct super_block *sb);
+extern void file_sb_list_del(struct file *f);
 extern void mark_files_ro(struct super_block *);
 extern struct file *get_empty_filp(void);
 
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index b0435dd..05a38b9 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -254,7 +254,9 @@
 {
 	int i;
 
-	ll_rw_block(SWRITE, *batch_count, bhs);
+	for (i = 0; i < *batch_count; i++)
+		write_dirty_buffer(bhs[i], WRITE);
+
 	for (i = 0; i < *batch_count; i++) {
 		struct buffer_head *bh = bhs[i];
 		clear_buffer_jwrite(bh);
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 28a9dda..95d8c11 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -119,7 +119,6 @@
 	struct buffer_head *bh;
 	journal_header_t *header;
 	int ret;
-	int barrier_done = 0;
 
 	if (is_journal_aborted(journal))
 		return 0;
@@ -137,34 +136,36 @@
 
 	JBUFFER_TRACE(descriptor, "write commit block");
 	set_buffer_dirty(bh);
+
 	if (journal->j_flags & JFS_BARRIER) {
-		set_buffer_ordered(bh);
-		barrier_done = 1;
-	}
-	ret = sync_dirty_buffer(bh);
-	if (barrier_done)
-		clear_buffer_ordered(bh);
-	/* is it possible for another commit to fail at roughly
-	 * the same time as this one?  If so, we don't want to
-	 * trust the barrier flag in the super, but instead want
-	 * to remember if we sent a barrier request
-	 */
-	if (ret == -EOPNOTSUPP && barrier_done) {
-		char b[BDEVNAME_SIZE];
+		ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
 
-		printk(KERN_WARNING
-			"JBD: barrier-based sync failed on %s - "
-			"disabling barriers\n",
-			bdevname(journal->j_dev, b));
-		spin_lock(&journal->j_state_lock);
-		journal->j_flags &= ~JFS_BARRIER;
-		spin_unlock(&journal->j_state_lock);
+		/*
+		 * Is it possible for another commit to fail at roughly
+		 * the same time as this one?  If so, we don't want to
+		 * trust the barrier flag in the super, but instead want
+		 * to remember if we sent a barrier request
+		 */
+		if (ret == -EOPNOTSUPP) {
+			char b[BDEVNAME_SIZE];
 
-		/* And try again, without the barrier */
-		set_buffer_uptodate(bh);
-		set_buffer_dirty(bh);
+			printk(KERN_WARNING
+				"JBD: barrier-based sync failed on %s - "
+				"disabling barriers\n",
+				bdevname(journal->j_dev, b));
+			spin_lock(&journal->j_state_lock);
+			journal->j_flags &= ~JFS_BARRIER;
+			spin_unlock(&journal->j_state_lock);
+
+			/* And try again, without the barrier */
+			set_buffer_uptodate(bh);
+			set_buffer_dirty(bh);
+			ret = sync_dirty_buffer(bh);
+		}
+	} else {
 		ret = sync_dirty_buffer(bh);
 	}
+
 	put_bh(bh);		/* One for getblk() */
 	journal_put_journal_head(descriptor);
 
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index f19ce94..2c4b1f1 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1024,7 +1024,7 @@
 	if (wait)
 		sync_dirty_buffer(bh);
 	else
-		ll_rw_block(SWRITE, 1, &bh);
+		write_dirty_buffer(bh, WRITE);
 
 out:
 	/* If we have just flushed the log (by marking s_start==0), then
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index ad71732..d290183 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -617,7 +617,7 @@
 	set_buffer_jwrite(bh);
 	BUFFER_TRACE(bh, "write");
 	set_buffer_dirty(bh);
-	ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
+	write_dirty_buffer(bh, write_op);
 }
 #endif
 
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 1c23a0f4..5247e7f 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -255,7 +255,9 @@
 {
 	int i;
 
-	ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs);
+	for (i = 0; i < *batch_count; i++)
+		write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
+
 	for (i = 0; i < *batch_count; i++) {
 		struct buffer_head *bh = journal->j_chkpt_bhs[i];
 		clear_buffer_jwrite(bh);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index f52e5e8..7c068c1 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -101,7 +101,6 @@
 	struct commit_header *tmp;
 	struct buffer_head *bh;
 	int ret;
-	int barrier_done = 0;
 	struct timespec now = current_kernel_time();
 
 	if (is_journal_aborted(journal))
@@ -136,30 +135,22 @@
 	if (journal->j_flags & JBD2_BARRIER &&
 	    !JBD2_HAS_INCOMPAT_FEATURE(journal,
 				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
-		set_buffer_ordered(bh);
-		barrier_done = 1;
-	}
-	ret = submit_bh(WRITE_SYNC_PLUG, bh);
-	if (barrier_done)
-		clear_buffer_ordered(bh);
+		ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
+		if (ret == -EOPNOTSUPP) {
+			printk(KERN_WARNING
+			       "JBD2: Disabling barriers on %s, "
+			       "not supported by device\n", journal->j_devname);
+			write_lock(&journal->j_state_lock);
+			journal->j_flags &= ~JBD2_BARRIER;
+			write_unlock(&journal->j_state_lock);
 
-	/* is it possible for another commit to fail at roughly
-	 * the same time as this one?  If so, we don't want to
-	 * trust the barrier flag in the super, but instead want
-	 * to remember if we sent a barrier request
-	 */
-	if (ret == -EOPNOTSUPP && barrier_done) {
-		printk(KERN_WARNING
-		       "JBD2: Disabling barriers on %s, "
-		       "not supported by device\n", journal->j_devname);
-		write_lock(&journal->j_state_lock);
-		journal->j_flags &= ~JBD2_BARRIER;
-		write_unlock(&journal->j_state_lock);
-
-		/* And try again, without the barrier */
-		lock_buffer(bh);
-		set_buffer_uptodate(bh);
-		clear_buffer_dirty(bh);
+			/* And try again, without the barrier */
+			lock_buffer(bh);
+			set_buffer_uptodate(bh);
+			clear_buffer_dirty(bh);
+			ret = submit_bh(WRITE_SYNC_PLUG, bh);
+		}
+	} else {
 		ret = submit_bh(WRITE_SYNC_PLUG, bh);
 	}
 	*cbh = bh;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index ad5866a..0e8014e 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1124,7 +1124,7 @@
 			set_buffer_uptodate(bh);
 		}
 	} else
-		ll_rw_block(SWRITE, 1, &bh);
+		write_dirty_buffer(bh, WRITE);
 
 out:
 	/* If we have just flushed the log (by marking s_start==0), then
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index a360b06..9ad321f 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -625,7 +625,7 @@
 	set_buffer_jwrite(bh);
 	BUFFER_TRACE(bh, "write");
 	set_buffer_dirty(bh);
-	ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
+	write_dirty_buffer(bh, write_op);
 }
 #endif
 
diff --git a/fs/mbcache.c b/fs/mbcache.c
index cf4e6cd..9344474 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -80,6 +80,7 @@
 	struct list_head		c_cache_list;
 	const char			*c_name;
 	atomic_t			c_entry_count;
+	int				c_max_entries;
 	int				c_bucket_bits;
 	struct kmem_cache		*c_entry_cache;
 	struct list_head		*c_block_hash;
@@ -243,6 +244,12 @@
 	if (!cache->c_entry_cache)
 		goto fail2;
 
+	/*
+	 * Set an upper limit on the number of cache entries so that the hash
+	 * chains won't grow too long.
+	 */
+	cache->c_max_entries = bucket_count << 4;
+
 	spin_lock(&mb_cache_spinlock);
 	list_add(&cache->c_cache_list, &mb_cache_list);
 	spin_unlock(&mb_cache_spinlock);
@@ -333,7 +340,6 @@
 	kfree(cache);
 }
 
-
 /*
  * mb_cache_entry_alloc()
  *
@@ -345,17 +351,29 @@
 struct mb_cache_entry *
 mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
 {
-	struct mb_cache_entry *ce;
+	struct mb_cache_entry *ce = NULL;
 
-	ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
-	if (ce) {
+	if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
+		spin_lock(&mb_cache_spinlock);
+		if (!list_empty(&mb_cache_lru_list)) {
+			ce = list_entry(mb_cache_lru_list.next,
+					struct mb_cache_entry, e_lru_list);
+			list_del_init(&ce->e_lru_list);
+			__mb_cache_entry_unhash(ce);
+		}
+		spin_unlock(&mb_cache_spinlock);
+	}
+	if (!ce) {
+		ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
+		if (!ce)
+			return NULL;
 		atomic_inc(&cache->c_entry_count);
 		INIT_LIST_HEAD(&ce->e_lru_list);
 		INIT_LIST_HEAD(&ce->e_block_list);
 		ce->e_cache = cache;
-		ce->e_used = 1 + MB_CACHE_WRITER;
 		ce->e_queued = 0;
 	}
+	ce->e_used = 1 + MB_CACHE_WRITER;
 	return ce;
 }
 
diff --git a/fs/namei.c b/fs/namei.c
index 17ea76bf..24896e8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -595,15 +595,16 @@
 {
 	struct vfsmount *parent;
 	struct dentry *mountpoint;
-	spin_lock(&vfsmount_lock);
+
+	br_read_lock(vfsmount_lock);
 	parent = path->mnt->mnt_parent;
 	if (parent == path->mnt) {
-		spin_unlock(&vfsmount_lock);
+		br_read_unlock(vfsmount_lock);
 		return 0;
 	}
 	mntget(parent);
 	mountpoint = dget(path->mnt->mnt_mountpoint);
-	spin_unlock(&vfsmount_lock);
+	br_read_unlock(vfsmount_lock);
 	dput(path->dentry);
 	path->dentry = mountpoint;
 	mntput(path->mnt);
@@ -686,6 +687,35 @@
 }
 
 /*
+ * Allocate a dentry with name and parent, and perform a parent
+ * directory ->lookup on it. Returns the new dentry, or ERR_PTR
+ * on error. parent->d_inode->i_mutex must be held. d_lookup must
+ * have verified that no child exists while under i_mutex.
+ */
+static struct dentry *d_alloc_and_lookup(struct dentry *parent,
+				struct qstr *name, struct nameidata *nd)
+{
+	struct inode *inode = parent->d_inode;
+	struct dentry *dentry;
+	struct dentry *old;
+
+	/* Don't create child dentry for a dead directory. */
+	if (unlikely(IS_DEADDIR(inode)))
+		return ERR_PTR(-ENOENT);
+
+	dentry = d_alloc(parent, name);
+	if (unlikely(!dentry))
+		return ERR_PTR(-ENOMEM);
+
+	old = inode->i_op->lookup(inode, dentry, nd);
+	if (unlikely(old)) {
+		dput(dentry);
+		dentry = old;
+	}
+	return dentry;
+}
+
+/*
  *  It's more convoluted than I'd like it to be, but... it's still fairly
  *  small and for now I'd prefer to have fast path as straight as possible.
  *  It _is_ time-critical.
@@ -706,9 +736,15 @@
 			return err;
 	}
 
+	/*
+	 * Rename seqlock is not required here because in the off chance
+	 * of a false negative due to a concurrent rename, we're going to
+	 * do the non-racy lookup, below.
+	 */
 	dentry = __d_lookup(nd->path.dentry, name);
 	if (!dentry)
 		goto need_lookup;
+found:
 	if (dentry->d_op && dentry->d_op->d_revalidate)
 		goto need_revalidate;
 done:
@@ -724,56 +760,28 @@
 	mutex_lock(&dir->i_mutex);
 	/*
 	 * First re-do the cached lookup just in case it was created
-	 * while we waited for the directory semaphore..
+	 * while we waited for the directory semaphore, or the first
+	 * lookup failed due to an unrelated rename.
 	 *
-	 * FIXME! This could use version numbering or similar to
-	 * avoid unnecessary cache lookups.
-	 *
-	 * The "dcache_lock" is purely to protect the RCU list walker
-	 * from concurrent renames at this point (we mustn't get false
-	 * negatives from the RCU list walk here, unlike the optimistic
-	 * fast walk).
-	 *
-	 * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
+	 * This could use version numbering or similar to avoid unnecessary
+	 * cache lookups, but then we'd have to do the first lookup in the
+	 * non-racy way. However in the common case here, everything should
+	 * be hot in cache, so would it be a big win?
 	 */
 	dentry = d_lookup(parent, name);
-	if (!dentry) {
-		struct dentry *new;
-
-		/* Don't create child dentry for a dead directory. */
-		dentry = ERR_PTR(-ENOENT);
-		if (IS_DEADDIR(dir))
-			goto out_unlock;
-
-		new = d_alloc(parent, name);
-		dentry = ERR_PTR(-ENOMEM);
-		if (new) {
-			dentry = dir->i_op->lookup(dir, new, nd);
-			if (dentry)
-				dput(new);
-			else
-				dentry = new;
-		}
-out_unlock:
+	if (likely(!dentry)) {
+		dentry = d_alloc_and_lookup(parent, name, nd);
 		mutex_unlock(&dir->i_mutex);
 		if (IS_ERR(dentry))
 			goto fail;
 		goto done;
 	}
-
 	/*
 	 * Uhhuh! Nasty case: the cache was re-populated while
 	 * we waited on the semaphore. Need to revalidate.
 	 */
 	mutex_unlock(&dir->i_mutex);
-	if (dentry->d_op && dentry->d_op->d_revalidate) {
-		dentry = do_revalidate(dentry, nd);
-		if (!dentry)
-			dentry = ERR_PTR(-ENOENT);
-	}
-	if (IS_ERR(dentry))
-		goto fail;
-	goto done;
+	goto found;
 
 need_revalidate:
 	dentry = do_revalidate(dentry, nd);
@@ -1130,35 +1138,18 @@
 			goto out;
 	}
 
-	dentry = __d_lookup(base, name);
-
-	/* lockess __d_lookup may fail due to concurrent d_move()
-	 * in some unrelated directory, so try with d_lookup
+	/*
+	 * Don't bother with __d_lookup: callers are for creat as
+	 * well as unlink, so a lot of the time it would cost
+	 * a double lookup.
 	 */
-	if (!dentry)
-		dentry = d_lookup(base, name);
+	dentry = d_lookup(base, name);
 
 	if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
 		dentry = do_revalidate(dentry, nd);
 
-	if (!dentry) {
-		struct dentry *new;
-
-		/* Don't create child dentry for a dead directory. */
-		dentry = ERR_PTR(-ENOENT);
-		if (IS_DEADDIR(inode))
-			goto out;
-
-		new = d_alloc(base, name);
-		dentry = ERR_PTR(-ENOMEM);
-		if (!new)
-			goto out;
-		dentry = inode->i_op->lookup(inode, new, nd);
-		if (!dentry)
-			dentry = new;
-		else
-			dput(new);
-	}
+	if (!dentry)
+		dentry = d_alloc_and_lookup(base, name, nd);
 out:
 	return dentry;
 }
diff --git a/fs/namespace.c b/fs/namespace.c
index 2e10cb1..de402eb 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -11,6 +11,8 @@
 #include <linux/syscalls.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -38,12 +40,10 @@
 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
 #define HASH_SIZE (1UL << HASH_SHIFT)
 
-/* spinlock for vfsmount related operations, inplace of dcache_lock */
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
-
 static int event;
 static DEFINE_IDA(mnt_id_ida);
 static DEFINE_IDA(mnt_group_ida);
+static DEFINE_SPINLOCK(mnt_id_lock);
 static int mnt_id_start = 0;
 static int mnt_group_start = 1;
 
@@ -55,6 +55,16 @@
 struct kobject *fs_kobj;
 EXPORT_SYMBOL_GPL(fs_kobj);
 
+/*
+ * vfsmount lock may be taken for read to prevent changes to the
+ * vfsmount hash, ie. during mountpoint lookups or walking back
+ * up the tree.
+ *
+ * It should be taken for write in all cases where the vfsmount
+ * tree or hash is modified or when a vfsmount structure is modified.
+ */
+DEFINE_BRLOCK(vfsmount_lock);
+
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
 	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
@@ -65,18 +75,21 @@
 
 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
 
-/* allocation is serialized by namespace_sem */
+/*
+ * allocation is serialized by namespace_sem, but we need the spinlock to
+ * serialize with freeing.
+ */
 static int mnt_alloc_id(struct vfsmount *mnt)
 {
 	int res;
 
 retry:
 	ida_pre_get(&mnt_id_ida, GFP_KERNEL);
-	spin_lock(&vfsmount_lock);
+	spin_lock(&mnt_id_lock);
 	res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
 	if (!res)
 		mnt_id_start = mnt->mnt_id + 1;
-	spin_unlock(&vfsmount_lock);
+	spin_unlock(&mnt_id_lock);
 	if (res == -EAGAIN)
 		goto retry;
 
@@ -86,11 +99,11 @@
 static void mnt_free_id(struct vfsmount *mnt)
 {
 	int id = mnt->mnt_id;
-	spin_lock(&vfsmount_lock);
+	spin_lock(&mnt_id_lock);
 	ida_remove(&mnt_id_ida, id);
 	if (mnt_id_start > id)
 		mnt_id_start = id;
-	spin_unlock(&vfsmount_lock);
+	spin_unlock(&mnt_id_lock);
 }
 
 /*
@@ -348,7 +361,7 @@
 {
 	int ret = 0;
 
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	mnt->mnt_flags |= MNT_WRITE_HOLD;
 	/*
 	 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -382,15 +395,15 @@
 	 */
 	smp_wmb();
 	mnt->mnt_flags &= ~MNT_WRITE_HOLD;
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	return ret;
 }
 
 static void __mnt_unmake_readonly(struct vfsmount *mnt)
 {
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	mnt->mnt_flags &= ~MNT_READONLY;
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 }
 
 void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
@@ -414,6 +427,7 @@
 /*
  * find the first or last mount at @dentry on vfsmount @mnt depending on
  * @dir. If @dir is set return the first mount else return the last mount.
+ * vfsmount_lock must be held for read or write.
  */
 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
 			      int dir)
@@ -443,10 +457,11 @@
 struct vfsmount *lookup_mnt(struct path *path)
 {
 	struct vfsmount *child_mnt;
-	spin_lock(&vfsmount_lock);
+
+	br_read_lock(vfsmount_lock);
 	if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
 		mntget(child_mnt);
-	spin_unlock(&vfsmount_lock);
+	br_read_unlock(vfsmount_lock);
 	return child_mnt;
 }
 
@@ -455,6 +470,9 @@
 	return mnt->mnt_ns == current->nsproxy->mnt_ns;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void touch_mnt_namespace(struct mnt_namespace *ns)
 {
 	if (ns) {
@@ -463,6 +481,9 @@
 	}
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void __touch_mnt_namespace(struct mnt_namespace *ns)
 {
 	if (ns && ns->event != event) {
@@ -471,6 +492,9 @@
 	}
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
 {
 	old_path->dentry = mnt->mnt_mountpoint;
@@ -482,6 +506,9 @@
 	old_path->dentry->d_mounted--;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
 			struct vfsmount *child_mnt)
 {
@@ -490,6 +517,9 @@
 	dentry->d_mounted++;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 static void attach_mnt(struct vfsmount *mnt, struct path *path)
 {
 	mnt_set_mountpoint(path->mnt, path->dentry, mnt);
@@ -499,7 +529,7 @@
 }
 
 /*
- * the caller must hold vfsmount_lock
+ * vfsmount lock must be held for write
  */
 static void commit_tree(struct vfsmount *mnt)
 {
@@ -623,39 +653,43 @@
 void mntput_no_expire(struct vfsmount *mnt)
 {
 repeat:
-	if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
-		if (likely(!mnt->mnt_pinned)) {
-			spin_unlock(&vfsmount_lock);
-			__mntput(mnt);
-			return;
-		}
-		atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
-		mnt->mnt_pinned = 0;
-		spin_unlock(&vfsmount_lock);
-		acct_auto_close_mnt(mnt);
-		goto repeat;
+	if (atomic_add_unless(&mnt->mnt_count, -1, 1))
+		return;
+	br_write_lock(vfsmount_lock);
+	if (!atomic_dec_and_test(&mnt->mnt_count)) {
+		br_write_unlock(vfsmount_lock);
+		return;
 	}
+	if (likely(!mnt->mnt_pinned)) {
+		br_write_unlock(vfsmount_lock);
+		__mntput(mnt);
+		return;
+	}
+	atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
+	mnt->mnt_pinned = 0;
+	br_write_unlock(vfsmount_lock);
+	acct_auto_close_mnt(mnt);
+	goto repeat;
 }
-
 EXPORT_SYMBOL(mntput_no_expire);
 
 void mnt_pin(struct vfsmount *mnt)
 {
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	mnt->mnt_pinned++;
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 }
 
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *mnt)
 {
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	if (mnt->mnt_pinned) {
 		atomic_inc(&mnt->mnt_count);
 		mnt->mnt_pinned--;
 	}
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 }
 
 EXPORT_SYMBOL(mnt_unpin);
@@ -746,12 +780,12 @@
 	struct mnt_namespace *ns = p->ns;
 	int res = 0;
 
-	spin_lock(&vfsmount_lock);
+	br_read_lock(vfsmount_lock);
 	if (p->event != ns->event) {
 		p->event = ns->event;
 		res = 1;
 	}
-	spin_unlock(&vfsmount_lock);
+	br_read_unlock(vfsmount_lock);
 
 	return res;
 }
@@ -952,12 +986,12 @@
 	int minimum_refs = 0;
 	struct vfsmount *p;
 
-	spin_lock(&vfsmount_lock);
+	br_read_lock(vfsmount_lock);
 	for (p = mnt; p; p = next_mnt(p, mnt)) {
 		actual_refs += atomic_read(&p->mnt_count);
 		minimum_refs += 2;
 	}
-	spin_unlock(&vfsmount_lock);
+	br_read_unlock(vfsmount_lock);
 
 	if (actual_refs > minimum_refs)
 		return 0;
@@ -984,10 +1018,10 @@
 {
 	int ret = 1;
 	down_read(&namespace_sem);
-	spin_lock(&vfsmount_lock);
+	br_read_lock(vfsmount_lock);
 	if (propagate_mount_busy(mnt, 2))
 		ret = 0;
-	spin_unlock(&vfsmount_lock);
+	br_read_unlock(vfsmount_lock);
 	up_read(&namespace_sem);
 	return ret;
 }
@@ -1003,13 +1037,14 @@
 		if (mnt->mnt_parent != mnt) {
 			struct dentry *dentry;
 			struct vfsmount *m;
-			spin_lock(&vfsmount_lock);
+
+			br_write_lock(vfsmount_lock);
 			dentry = mnt->mnt_mountpoint;
 			m = mnt->mnt_parent;
 			mnt->mnt_mountpoint = mnt->mnt_root;
 			mnt->mnt_parent = mnt;
 			m->mnt_ghosts--;
-			spin_unlock(&vfsmount_lock);
+			br_write_unlock(vfsmount_lock);
 			dput(dentry);
 			mntput(m);
 		}
@@ -1017,6 +1052,10 @@
 	}
 }
 
+/*
+ * vfsmount lock must be held for write
+ * namespace_sem must be held for write
+ */
 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
 {
 	struct vfsmount *p;
@@ -1107,7 +1146,7 @@
 	}
 
 	down_write(&namespace_sem);
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	event++;
 
 	if (!(flags & MNT_DETACH))
@@ -1119,7 +1158,7 @@
 			umount_tree(mnt, 1, &umount_list);
 		retval = 0;
 	}
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	up_write(&namespace_sem);
 	release_mounts(&umount_list);
 	return retval;
@@ -1231,19 +1270,19 @@
 			q = clone_mnt(p, p->mnt_root, flag);
 			if (!q)
 				goto Enomem;
-			spin_lock(&vfsmount_lock);
+			br_write_lock(vfsmount_lock);
 			list_add_tail(&q->mnt_list, &res->mnt_list);
 			attach_mnt(q, &path);
-			spin_unlock(&vfsmount_lock);
+			br_write_unlock(vfsmount_lock);
 		}
 	}
 	return res;
 Enomem:
 	if (res) {
 		LIST_HEAD(umount_list);
-		spin_lock(&vfsmount_lock);
+		br_write_lock(vfsmount_lock);
 		umount_tree(res, 0, &umount_list);
-		spin_unlock(&vfsmount_lock);
+		br_write_unlock(vfsmount_lock);
 		release_mounts(&umount_list);
 	}
 	return NULL;
@@ -1262,9 +1301,9 @@
 {
 	LIST_HEAD(umount_list);
 	down_write(&namespace_sem);
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	umount_tree(mnt, 0, &umount_list);
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	up_write(&namespace_sem);
 	release_mounts(&umount_list);
 }
@@ -1392,7 +1431,7 @@
 	if (err)
 		goto out_cleanup_ids;
 
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 
 	if (IS_MNT_SHARED(dest_mnt)) {
 		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1411,7 +1450,8 @@
 		list_del_init(&child->mnt_hash);
 		commit_tree(child);
 	}
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
+
 	return 0;
 
  out_cleanup_ids:
@@ -1466,10 +1506,10 @@
 			goto out_unlock;
 	}
 
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
 		change_mnt_propagation(m, type);
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 
  out_unlock:
 	up_write(&namespace_sem);
@@ -1513,9 +1553,10 @@
 	err = graft_tree(mnt, path);
 	if (err) {
 		LIST_HEAD(umount_list);
-		spin_lock(&vfsmount_lock);
+
+		br_write_lock(vfsmount_lock);
 		umount_tree(mnt, 0, &umount_list);
-		spin_unlock(&vfsmount_lock);
+		br_write_unlock(vfsmount_lock);
 		release_mounts(&umount_list);
 	}
 
@@ -1568,16 +1609,16 @@
 	else
 		err = do_remount_sb(sb, flags, data, 0);
 	if (!err) {
-		spin_lock(&vfsmount_lock);
+		br_write_lock(vfsmount_lock);
 		mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
 		path->mnt->mnt_flags = mnt_flags;
-		spin_unlock(&vfsmount_lock);
+		br_write_unlock(vfsmount_lock);
 	}
 	up_write(&sb->s_umount);
 	if (!err) {
-		spin_lock(&vfsmount_lock);
+		br_write_lock(vfsmount_lock);
 		touch_mnt_namespace(path->mnt->mnt_ns);
-		spin_unlock(&vfsmount_lock);
+		br_write_unlock(vfsmount_lock);
 	}
 	return err;
 }
@@ -1754,7 +1795,7 @@
 		return;
 
 	down_write(&namespace_sem);
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 
 	/* extract from the expiration list every vfsmount that matches the
 	 * following criteria:
@@ -1773,7 +1814,7 @@
 		touch_mnt_namespace(mnt->mnt_ns);
 		umount_tree(mnt, 1, &umounts);
 	}
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	up_write(&namespace_sem);
 
 	release_mounts(&umounts);
@@ -1830,6 +1871,8 @@
 /*
  * process a list of expirable mountpoints with the intent of discarding any
  * submounts of a specific parent mountpoint
+ *
+ * vfsmount_lock must be held for write
  */
 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
 {
@@ -2048,9 +2091,9 @@
 		kfree(new_ns);
 		return ERR_PTR(-ENOMEM);
 	}
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 
 	/*
 	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2244,7 +2287,7 @@
 		goto out2; /* not attached */
 	/* make sure we can reach put_old from new_root */
 	tmp = old.mnt;
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	if (tmp != new.mnt) {
 		for (;;) {
 			if (tmp->mnt_parent == tmp)
@@ -2264,7 +2307,7 @@
 	/* mount new_root on / */
 	attach_mnt(new.mnt, &root_parent);
 	touch_mnt_namespace(current->nsproxy->mnt_ns);
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	chroot_fs_refs(&root, &new);
 	error = 0;
 	path_put(&root_parent);
@@ -2279,7 +2322,7 @@
 out0:
 	return error;
 out3:
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	goto out2;
 }
 
@@ -2326,6 +2369,8 @@
 	for (u = 0; u < HASH_SIZE; u++)
 		INIT_LIST_HEAD(&mount_hashtable[u]);
 
+	br_lock_init(vfsmount_lock);
+
 	err = sysfs_init();
 	if (err)
 		printk(KERN_WARNING "%s: sysfs_init error: %d\n",
@@ -2344,9 +2389,9 @@
 	if (!atomic_dec_and_test(&ns->count))
 		return;
 	down_write(&namespace_sem);
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	umount_tree(ns->root, 0, &umount_list);
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	up_write(&namespace_sem);
 	release_mounts(&umount_list);
 	kfree(ns);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 26a510a..6c2aad4 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -63,7 +63,6 @@
 config NFS_V4
 	bool "NFS client support for NFS version 4"
 	depends on NFS_FS
-	select RPCSEC_GSS_KRB5
 	help
 	  This option enables support for version 4 of the NFS protocol
 	  (RFC 3530) in the kernel's NFS client.
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 29539ce..e257172 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -140,6 +140,13 @@
 
 	/* Call generic open code in order to cache credentials */
 	res = nfs_open(inode, filp);
+	if (filp->f_path.dentry == filp->f_path.mnt->mnt_root) {
+		/* This is a mountpoint, so d_revalidate will never
+		 * have been called, so we need to refresh the
+		 * inode (for close-open consistency) ourselves.
+		 */
+		__nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	}
 	return res;
 }
 
@@ -1103,7 +1110,7 @@
 	if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
 		goto no_open_dput;
 	/* We can't create new files, or truncate existing ones here */
-	openflags &= ~(O_CREAT|O_TRUNC);
+	openflags &= ~(O_CREAT|O_EXCL|O_TRUNC);
 
 	/*
 	 * Note: we're not holding inode->i_mutex and so may be racing with
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 2d141a7..eb51bd6 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -323,7 +323,7 @@
 	have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
 	if (have_error)
 		ret = xchg(&ctx->error, 0);
-	if (!ret)
+	if (!ret && status < 0)
 		ret = status;
 	return ret;
 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7ffbb98..089da5b5 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2036,7 +2036,8 @@
 	struct rpc_cred *cred;
 	struct nfs4_state *state;
 	struct dentry *res;
-	fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+	int open_flags = nd->intent.open.flags;
+	fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
 
 	if (nd->flags & LOOKUP_CREATE) {
 		attr.ia_mode = nd->intent.open.create_mode;
@@ -2044,8 +2045,9 @@
 		if (!IS_POSIXACL(dir))
 			attr.ia_mode &= ~current_umask();
 	} else {
+		open_flags &= ~O_EXCL;
 		attr.ia_valid = 0;
-		BUG_ON(nd->intent.open.flags & O_CREAT);
+		BUG_ON(open_flags & O_CREAT);
 	}
 
 	cred = rpc_lookup_cred();
@@ -2054,7 +2056,7 @@
 	parent = dentry->d_parent;
 	/* Protect against concurrent sillydeletes */
 	nfs_block_sillyrename(parent);
-	state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred);
+	state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred);
 	put_rpccred(cred);
 	if (IS_ERR(state)) {
 		if (PTR_ERR(state) == -ENOENT) {
@@ -2273,8 +2275,7 @@
 out:
 	if (page)
 		__free_page(page);
-	if (locations)
-		kfree(locations);
+	kfree(locations);
 	return status;
 }
 
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ee26316a..ec3966e 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -655,6 +655,13 @@
 
 	if (nfss->options & NFS_OPTION_FSCACHE)
 		seq_printf(m, ",fsc");
+
+	if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
+		if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
+			seq_printf(m, ",lookupcache=none");
+		else
+			seq_printf(m, ",lookupcache=pos");
+	}
 }
 
 /*
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 503b9da..95932f5 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -69,7 +69,6 @@
 	depends on NFSD && PROC_FS && EXPERIMENTAL
 	select NFSD_V3
 	select FS_POSIX_ACL
-	select RPCSEC_GSS_KRB5
 	help
 	  This option enables support in your system's NFS server for
 	  version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 1fa86b9..9222633 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -175,24 +175,24 @@
 {
 	struct the_nilfs *nilfs = sbi->s_nilfs;
 	int err;
-	int barrier_done = 0;
 
-	if (nilfs_test_opt(sbi, BARRIER)) {
-		set_buffer_ordered(nilfs->ns_sbh[0]);
-		barrier_done = 1;
-	}
  retry:
 	set_buffer_dirty(nilfs->ns_sbh[0]);
-	err = sync_dirty_buffer(nilfs->ns_sbh[0]);
-	if (err == -EOPNOTSUPP && barrier_done) {
-		nilfs_warning(sbi->s_super, __func__,
-			      "barrier-based sync failed. "
-			      "disabling barriers\n");
-		nilfs_clear_opt(sbi, BARRIER);
-		barrier_done = 0;
-		clear_buffer_ordered(nilfs->ns_sbh[0]);
-		goto retry;
+
+	if (nilfs_test_opt(sbi, BARRIER)) {
+		err = __sync_dirty_buffer(nilfs->ns_sbh[0],
+					  WRITE_SYNC | WRITE_BARRIER);
+		if (err == -EOPNOTSUPP) {
+			nilfs_warning(sbi->s_super, __func__,
+				      "barrier-based sync failed. "
+				      "disabling barriers\n");
+			nilfs_clear_opt(sbi, BARRIER);
+			goto retry;
+		}
+	} else {
+		err = sync_dirty_buffer(nilfs->ns_sbh[0]);
 	}
+
 	if (unlikely(err)) {
 		printk(KERN_ERR
 		       "NILFS: unable to write superblock (err=%d)\n", err);
@@ -400,9 +400,10 @@
 	list_add(&sbi->s_list, &nilfs->ns_supers);
 	up_write(&nilfs->ns_super_sem);
 
+	err = -ENOMEM;
 	sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
 	if (!sbi->s_ifile)
-		return -ENOMEM;
+		goto delist;
 
 	down_read(&nilfs->ns_segctor_sem);
 	err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
@@ -433,6 +434,7 @@
 	nilfs_mdt_destroy(sbi->s_ifile);
 	sbi->s_ifile = NULL;
 
+ delist:
 	down_write(&nilfs->ns_super_sem);
 	list_del_init(&sbi->s_list);
 	up_write(&nilfs->ns_super_sem);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 37de1f0..4317f17 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -608,11 +608,11 @@
 		return -EINVAL;
 	}
 
-	if (swp) {
+	if (!valid[!swp])
 		printk(KERN_WARNING "NILFS warning: broken superblock. "
 		       "using spare superblock.\n");
+	if (swp)
 		nilfs_swap_super_block(nilfs);
-	}
 
 	nilfs->ns_sbwcount = 0;
 	nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
@@ -775,6 +775,7 @@
 						   start * sects_per_block,
 						   nblocks * sects_per_block,
 						   GFP_NOFS,
+						   BLKDEV_IFL_WAIT |
 						   BLKDEV_IFL_BARRIER);
 			if (ret < 0)
 				return ret;
@@ -785,7 +786,8 @@
 		ret = blkdev_issue_discard(nilfs->ns_bdev,
 					   start * sects_per_block,
 					   nblocks * sects_per_block,
-					   GFP_NOFS, BLKDEV_IFL_BARRIER);
+					   GFP_NOFS,
+					  BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
 	return ret;
 }
 
diff --git a/fs/open.c b/fs/open.c
index 630715f..d74e198 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -675,7 +675,7 @@
 	f->f_path.mnt = mnt;
 	f->f_pos = 0;
 	f->f_op = fops_get(inode->i_fop);
-	file_move(f, &inode->i_sb->s_files);
+	file_sb_list_add(f, inode->i_sb);
 
 	error = security_dentry_open(f, cred);
 	if (error)
@@ -721,7 +721,7 @@
 			mnt_drop_write(mnt);
 		}
 	}
-	file_kill(f);
+	file_sb_list_del(f);
 	f->f_path.dentry = NULL;
 	f->f_path.mnt = NULL;
 cleanup_file:
diff --git a/fs/pnode.c b/fs/pnode.c
index 5cc564a..8066b8d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -126,6 +126,9 @@
 	return 0;
 }
 
+/*
+ * vfsmount lock must be held for write
+ */
 void change_mnt_propagation(struct vfsmount *mnt, int type)
 {
 	if (type == MS_SHARED) {
@@ -270,12 +273,12 @@
 		prev_src_mnt  = child;
 	}
 out:
-	spin_lock(&vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	while (!list_empty(&tmp_list)) {
 		child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
 		umount_tree(child, 0, &umount_list);
 	}
-	spin_unlock(&vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	release_mounts(&umount_list);
 	return ret;
 }
@@ -296,6 +299,8 @@
  * other mounts its parent propagates to.
  * Check if any of these mounts that **do not have submounts**
  * have more references than 'refcnt'. If so return busy.
+ *
+ * vfsmount lock must be held for read or write
  */
 int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
 {
@@ -353,6 +358,8 @@
  * collect all mounts that receive propagation from the mount in @list,
  * and return these additional mounts in the same list.
  * @list: the list of mounts to be unmounted.
+ *
+ * vfsmount lock must be held for write
  */
 int propagate_umount(struct list_head *list)
 {
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ae35413..caa7583 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -83,6 +83,7 @@
 	dquot_drop(inode);
 	inode->i_blocks = 0;
 	reiserfs_write_unlock_once(inode->i_sb, depth);
+	return;
 
 no_delete:
 	end_writeback(inode);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 1ec952b..812e2c0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2311,7 +2311,7 @@
 	/* flush out the real blocks */
 	for (i = 0; i < get_desc_trans_len(desc); i++) {
 		set_buffer_dirty(real_blocks[i]);
-		ll_rw_block(SWRITE, 1, real_blocks + i);
+		write_dirty_buffer(real_blocks[i], WRITE);
 	}
 	for (i = 0; i < get_desc_trans_len(desc); i++) {
 		wait_on_buffer(real_blocks[i]);
diff --git a/fs/super.c b/fs/super.c
index 9674ab2..8819e3a 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -54,7 +54,22 @@
 			s = NULL;
 			goto out;
 		}
+#ifdef CONFIG_SMP
+		s->s_files = alloc_percpu(struct list_head);
+		if (!s->s_files) {
+			security_sb_free(s);
+			kfree(s);
+			s = NULL;
+			goto out;
+		} else {
+			int i;
+
+			for_each_possible_cpu(i)
+				INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
+		}
+#else
 		INIT_LIST_HEAD(&s->s_files);
+#endif
 		INIT_LIST_HEAD(&s->s_instances);
 		INIT_HLIST_HEAD(&s->s_anon);
 		INIT_LIST_HEAD(&s->s_inodes);
@@ -108,6 +123,9 @@
  */
 static inline void destroy_super(struct super_block *s)
 {
+#ifdef CONFIG_SMP
+	free_percpu(s->s_files);
+#endif
 	security_sb_free(s);
 	kfree(s->s_subtype);
 	kfree(s->s_options);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 048484f..46f7a80 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -114,10 +114,8 @@
 	
 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-	if (sb->s_flags & MS_SYNCHRONOUS) {
-		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-		ubh_wait_on_buffer (UCPI_UBH(ucpi));
-	}
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		ubh_sync_block(UCPI_UBH(ucpi));
 	sb->s_dirt = 1;
 	
 	unlock_super (sb);
@@ -207,10 +205,8 @@
 
 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-	if (sb->s_flags & MS_SYNCHRONOUS) {
-		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-		ubh_wait_on_buffer (UCPI_UBH(ucpi));
-	}
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		ubh_sync_block(UCPI_UBH(ucpi));
 
 	if (overflow) {
 		fragment += count;
@@ -558,10 +554,8 @@
 	
 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-	if (sb->s_flags & MS_SYNCHRONOUS) {
-		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-		ubh_wait_on_buffer (UCPI_UBH(ucpi));
-	}
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		ubh_sync_block(UCPI_UBH(ucpi));
 	sb->s_dirt = 1;
 
 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
@@ -680,10 +674,8 @@
 succed:
 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-	if (sb->s_flags & MS_SYNCHRONOUS) {
-		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-		ubh_wait_on_buffer (UCPI_UBH(ucpi));
-	}
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		ubh_sync_block(UCPI_UBH(ucpi));
 	sb->s_dirt = 1;
 
 	result += cgno * uspi->s_fpg;
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 428017e..2eabf04 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -113,10 +113,8 @@
 
 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-	if (sb->s_flags & MS_SYNCHRONOUS) {
-		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-		ubh_wait_on_buffer (UCPI_UBH(ucpi));
-	}
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		ubh_sync_block(UCPI_UBH(ucpi));
 	
 	sb->s_dirt = 1;
 	unlock_super (sb);
@@ -156,10 +154,8 @@
 
 	fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
 	ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
-	if (sb->s_flags & MS_SYNCHRONOUS) {
-		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-		ubh_wait_on_buffer(UCPI_UBH(ucpi));
-	}
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		ubh_sync_block(UCPI_UBH(ucpi));
 
 	UFSD("EXIT\n");
 }
@@ -290,10 +286,8 @@
 	}
 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-	if (sb->s_flags & MS_SYNCHRONOUS) {
-		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
-		ubh_wait_on_buffer (UCPI_UBH(ucpi));
-	}
+	if (sb->s_flags & MS_SYNCHRONOUS)
+		ubh_sync_block(UCPI_UBH(ucpi));
 	sb->s_dirt = 1;
 
 	inode->i_ino = cg * uspi->s_ipg + bit;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 34d5cb1..a58f915 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -243,10 +243,8 @@
 		ubh_bforget(ind_ubh);
 		ind_ubh = NULL;
 	}
-	if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
-		ubh_ll_rw_block(SWRITE, ind_ubh);
-		ubh_wait_on_buffer (ind_ubh);
-	}
+	if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
+		ubh_sync_block(ind_ubh);
 	ubh_brelse (ind_ubh);
 	
 	UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -307,10 +305,8 @@
 		ubh_bforget(dind_bh);
 		dind_bh = NULL;
 	}
-	if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
-		ubh_ll_rw_block(SWRITE, dind_bh);
-		ubh_wait_on_buffer (dind_bh);
-	}
+	if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
+		ubh_sync_block(dind_bh);
 	ubh_brelse (dind_bh);
 	
 	UFSD("EXIT: ino %lu\n", inode->i_ino);
@@ -367,10 +363,8 @@
 		ubh_bforget(tind_bh);
 		tind_bh = NULL;
 	}
-	if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
-		ubh_ll_rw_block(SWRITE, tind_bh);
-		ubh_wait_on_buffer (tind_bh);
-	}
+	if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
+		ubh_sync_block(tind_bh);
 	ubh_brelse (tind_bh);
 	
 	UFSD("EXIT: ino %lu\n", inode->i_ino);
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index 85a7fc9..d2c36d5 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -113,21 +113,17 @@
 	}
 }
 
-void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh)
+void ubh_sync_block(struct ufs_buffer_head *ubh)
 {
-	if (!ubh)
-		return;
+	if (ubh) {
+		unsigned i;
 
-	ll_rw_block(rw, ubh->count, ubh->bh);
-}
+		for (i = 0; i < ubh->count; i++)
+			write_dirty_buffer(ubh->bh[i], WRITE);
 
-void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
-{
-	unsigned i;
-	if (!ubh)
-		return;
-	for ( i = 0; i < ubh->count; i++ )
-		wait_on_buffer (ubh->bh[i]);
+		for (i = 0; i < ubh->count; i++)
+			wait_on_buffer(ubh->bh[i]);
+	}
 }
 
 void ubh_bforget (struct ufs_buffer_head * ubh)
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 0466036..9f8775c 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -269,8 +269,7 @@
 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
 extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
-extern void ubh_ll_rw_block(int, struct ufs_buffer_head *);
-extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
+extern void ubh_sync_block(struct ufs_buffer_head *);
 extern void ubh_bforget (struct ufs_buffer_head *);
 extern int  ubh_buffer_dirty (struct ufs_buffer_head *);
 #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
index df84e3b0..d89dec8 100644
--- a/include/asm-generic/syscalls.h
+++ b/include/asm-generic/syscalls.h
@@ -23,8 +23,10 @@
 #endif
 
 #ifndef sys_execve
-asmlinkage long sys_execve(char __user *filename, char __user * __user *argv,
-			char __user * __user *envp, struct pt_regs *regs);
+asmlinkage long sys_execve(const char __user *filename,
+			   const char __user *const __user *argv,
+			   const char __user *const __user *envp,
+			   struct pt_regs *regs);
 #endif
 
 #ifndef sys_mmap2
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 2a512bc..7809d230 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -305,14 +305,16 @@
 	unsigned int cmd;
 	int flags;
 	drm_ioctl_t *func;
+	unsigned int cmd_drv;
 };
 
 /**
  * Creates a driver or general drm_ioctl_desc array entry for the given
  * ioctl, for use by drm_ioctl().
  */
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
-	[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags}
+
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags)			\
+	[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
 
 struct drm_magic_entry {
 	struct list_head head;
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h
index 4b00d2d..61315c2 100644
--- a/include/drm/i830_drm.h
+++ b/include/drm/i830_drm.h
@@ -264,20 +264,20 @@
 #define DRM_I830_GETPARAM	0x0c
 #define DRM_I830_SETPARAM	0x0d
 
-#define DRM_IOCTL_I830_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t)
-#define DRM_IOCTL_I830_VERTEX		DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t)
-#define DRM_IOCTL_I830_CLEAR		DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t)
-#define DRM_IOCTL_I830_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH)
-#define DRM_IOCTL_I830_GETAGE		DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE)
-#define DRM_IOCTL_I830_GETBUF		DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t)
-#define DRM_IOCTL_I830_SWAP		DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP)
-#define DRM_IOCTL_I830_COPY		DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t)
-#define DRM_IOCTL_I830_DOCOPY		DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY)
-#define DRM_IOCTL_I830_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP)
-#define DRM_IOCTL_I830_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t)
-#define DRM_IOCTL_I830_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t)
-#define DRM_IOCTL_I830_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t)
-#define DRM_IOCTL_I830_SETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t)
+#define DRM_IOCTL_I830_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX		DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR		DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH)
+#define DRM_IOCTL_I830_GETAGE		DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE)
+#define DRM_IOCTL_I830_GETBUF		DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP		DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP)
+#define DRM_IOCTL_I830_COPY		DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY		DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY)
+#define DRM_IOCTL_I830_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP)
+#define DRM_IOCTL_I830_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t)
+#define DRM_IOCTL_I830_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t)
+#define DRM_IOCTL_I830_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t)
+#define DRM_IOCTL_I830_SETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t)
 
 typedef struct _drm_i830_clear {
 	int clear_color;
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8f8b072..e41c74f 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -215,6 +215,7 @@
 #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index 3ffbc47..c16097f 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -248,7 +248,7 @@
 #define DRM_MGA_DMA_BOOTSTRAP  0x0c
 
 #define DRM_IOCTL_MGA_INIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
-#define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
+#define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
 #define DRM_IOCTL_MGA_RESET    DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_RESET)
 #define DRM_IOCTL_MGA_SWAP     DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_SWAP)
 #define DRM_IOCTL_MGA_CLEAR    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index fe917de..01a7141 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -197,4 +197,17 @@
 #define DRM_NOUVEAU_GEM_CPU_FINI       0x43
 #define DRM_NOUVEAU_GEM_INFO           0x44
 
+#define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_SETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
+#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC  DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
+#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
+#define DRM_IOCTL_NOUVEAU_GEM_NEW            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
+#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF        DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
+#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
+#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
+#define DRM_IOCTL_NOUVEAU_GEM_INFO           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+
 #endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 0acaf8f..10f8b53 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -547,8 +547,8 @@
 #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE	DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
 #define DRM_IOCTL_RADEON_CS		DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
 #define DRM_IOCTL_RADEON_INFO		DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
-#define DRM_IOCTL_RADEON_SET_TILING	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
-#define DRM_IOCTL_RADEON_GET_TILING	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+#define DRM_IOCTL_RADEON_GEM_SET_TILING	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+#define DRM_IOCTL_RADEON_GEM_GET_TILING	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
 #define DRM_IOCTL_RADEON_GEM_BUSY	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
 
 typedef struct drm_radeon_init {
diff --git a/include/drm/savage_drm.h b/include/drm/savage_drm.h
index 8a576ef..4863cf6 100644
--- a/include/drm/savage_drm.h
+++ b/include/drm/savage_drm.h
@@ -63,10 +63,10 @@
 #define DRM_SAVAGE_BCI_EVENT_EMIT	0x02
 #define DRM_SAVAGE_BCI_EVENT_WAIT	0x03
 
-#define DRM_IOCTL_SAVAGE_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
-#define DRM_IOCTL_SAVAGE_CMDBUF		DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
-#define DRM_IOCTL_SAVAGE_EVENT_EMIT	DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
-#define DRM_IOCTL_SAVAGE_EVENT_WAIT	DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
+#define DRM_IOCTL_SAVAGE_BCI_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
+#define DRM_IOCTL_SAVAGE_BCI_CMDBUF		DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT	DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT	DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
 
 #define SAVAGE_DMA_PCI	1
 #define SAVAGE_DMA_AGP	3
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index ca16c38..be33b3a 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -150,6 +150,7 @@
 	u16			off_cntl;
 	u32			clcd_cntl;
 	u32			cmap[16];
+	bool			clk_enabled;
 };
 
 static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index c809e28..a065612 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -50,8 +50,8 @@
 	int unsafe;		/* how unsafe this exec is (mask of LSM_UNSAFE_*) */
 	unsigned int per_clear;	/* bits to clear in current->personality */
 	int argc, envc;
-	char * filename;	/* Name of binary as seen by procps */
-	char * interp;		/* Name of the binary really executed. Most
+	const char * filename;	/* Name of binary as seen by procps */
+	const char * interp;	/* Name of the binary really executed. Most
 				   of the time same as filename, but could be
 				   different for binfmt_{misc,script} */
 	unsigned interp_flags;
@@ -126,7 +126,8 @@
 			   unsigned long stack_top,
 			   int executable_stack);
 extern int bprm_mm_init(struct linux_binprm *bprm);
-extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
+extern int copy_strings_kernel(int argc, const char *const *argv,
+			       struct linux_binprm *bprm);
 extern int prepare_bprm_creds(struct linux_binprm *bprm);
 extern void install_exec_creds(struct linux_binprm *bprm);
 extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 43e649a..ec94c12 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -32,7 +32,6 @@
 	BH_Delay,	/* Buffer is not yet allocated on disk */
 	BH_Boundary,	/* Block is followed by a discontiguity */
 	BH_Write_EIO,	/* I/O error on write */
-	BH_Ordered,	/* ordered write */
 	BH_Eopnotsupp,	/* operation not supported (barrier) */
 	BH_Unwritten,	/* Buffer is allocated on disk but not written */
 	BH_Quiet,	/* Buffer Error Prinks to be quiet */
@@ -125,7 +124,6 @@
 BUFFER_FNS(Delay, delay)
 BUFFER_FNS(Boundary, boundary)
 BUFFER_FNS(Write_EIO, write_io_error)
-BUFFER_FNS(Ordered, ordered)
 BUFFER_FNS(Eopnotsupp, eopnotsupp)
 BUFFER_FNS(Unwritten, unwritten)
 
@@ -183,6 +181,8 @@
 void __lock_buffer(struct buffer_head *bh);
 void ll_rw_block(int, int, struct buffer_head * bh[]);
 int sync_dirty_buffer(struct buffer_head *bh);
+int __sync_dirty_buffer(struct buffer_head *bh, int rw);
+void write_dirty_buffer(struct buffer_head *bh, int rw);
 int submit_bh(int, struct buffer_head *);
 void write_boundary_block(struct block_device *bdev,
 			sector_t bblock, unsigned blocksize);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9a96b4d..76041b6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -125,9 +125,6 @@
  *			block layer could (in theory) choose to ignore this
  *			request if it runs into resource problems.
  * WRITE		A normal async write. Device will be plugged.
- * SWRITE		Like WRITE, but a special case for ll_rw_block() that
- *			tells it to lock the buffer first. Normally a buffer
- *			must be locked before doing IO.
  * WRITE_SYNC_PLUG	Synchronous write. Identical to WRITE, but passes down
  *			the hint that someone will be waiting on this IO
  *			shortly. The device must still be unplugged explicitly,
@@ -138,9 +135,6 @@
  *			immediately after submission. The write equivalent
  *			of READ_SYNC.
  * WRITE_ODIRECT_PLUG	Special case write for O_DIRECT only.
- * SWRITE_SYNC
- * SWRITE_SYNC_PLUG	Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
- *			See SWRITE.
  * WRITE_BARRIER	Like WRITE_SYNC, but tells the block layer that all
  *			previously submitted writes must be safely on storage
  *			before this one is started. Also guarantees that when
@@ -155,7 +149,6 @@
 #define READ			0
 #define WRITE			RW_MASK
 #define READA			RWA_MASK
-#define SWRITE			(WRITE | READA)
 
 #define READ_SYNC		(READ | REQ_SYNC | REQ_UNPLUG)
 #define READ_META		(READ | REQ_META)
@@ -165,8 +158,6 @@
 #define WRITE_META		(WRITE | REQ_META)
 #define WRITE_BARRIER		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
 				 REQ_HARDBARRIER)
-#define SWRITE_SYNC_PLUG	(SWRITE | REQ_SYNC | REQ_NOIDLE)
-#define SWRITE_SYNC		(SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
 
 /*
  * These aren't really reads or writes, they pass down information about
@@ -929,6 +920,9 @@
 #define f_vfsmnt	f_path.mnt
 	const struct file_operations	*f_op;
 	spinlock_t		f_lock;  /* f_ep_links, f_flags, no IRQ */
+#ifdef CONFIG_SMP
+	int			f_sb_list_cpu;
+#endif
 	atomic_long_t		f_count;
 	unsigned int 		f_flags;
 	fmode_t			f_mode;
@@ -953,9 +947,6 @@
 	unsigned long f_mnt_write_state;
 #endif
 };
-extern spinlock_t files_lock;
-#define file_list_lock() spin_lock(&files_lock);
-#define file_list_unlock() spin_unlock(&files_lock);
 
 #define get_file(x)	atomic_long_inc(&(x)->f_count)
 #define fput_atomic(x)	atomic_long_add_unless(&(x)->f_count, -1, 1)
@@ -1346,7 +1337,11 @@
 
 	struct list_head	s_inodes;	/* all inodes */
 	struct hlist_head	s_anon;		/* anonymous dentries for (nfs) exporting */
+#ifdef CONFIG_SMP
+	struct list_head __percpu *s_files;
+#else
 	struct list_head	s_files;
+#endif
 	/* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
 	struct list_head	s_dentry_lru;	/* unused dentry lru */
 	int			s_nr_dentry_unused;	/* # of dentry on lru */
@@ -2197,8 +2192,6 @@
 	__insert_inode_hash(inode, inode->i_ino);
 }
 
-extern void file_move(struct file *f, struct list_head *list);
-extern void file_kill(struct file *f);
 #ifdef CONFIG_BLOCK
 extern void submit_bio(int, struct bio *);
 extern int bdev_read_only(struct block_device *);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index eca3d52..a42b5bf 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -5,7 +5,7 @@
 
 struct fs_struct {
 	int users;
-	rwlock_t lock;
+	spinlock_t lock;
 	int umask;
 	int in_exec;
 	struct path root, pwd;
@@ -23,29 +23,29 @@
 
 static inline void get_fs_root(struct fs_struct *fs, struct path *root)
 {
-	read_lock(&fs->lock);
+	spin_lock(&fs->lock);
 	*root = fs->root;
 	path_get(root);
-	read_unlock(&fs->lock);
+	spin_unlock(&fs->lock);
 }
 
 static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
 {
-	read_lock(&fs->lock);
+	spin_lock(&fs->lock);
 	*pwd = fs->pwd;
 	path_get(pwd);
-	read_unlock(&fs->lock);
+	spin_unlock(&fs->lock);
 }
 
 static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
 				       struct path *pwd)
 {
-	read_lock(&fs->lock);
+	spin_lock(&fs->lock);
 	*root = fs->root;
 	path_get(root);
 	*pwd = fs->pwd;
 	path_get(pwd);
-	read_unlock(&fs->lock);
+	spin_unlock(&fs->lock);
 }
 
 #endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 311f875..4aa95f2 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -836,6 +836,8 @@
 
 extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize);
 
+extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize);
+
 extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo,
 	void *buf, unsigned int len, size_t recsize);
 
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
new file mode 100644
index 0000000..b288cb7
--- /dev/null
+++ b/include/linux/lglock.h
@@ -0,0 +1,172 @@
+/*
+ * Specialised local-global spinlock. Can only be declared as global variables
+ * to avoid overhead and keep things simple (and we don't want to start using
+ * these inside dynamically allocated structures).
+ *
+ * "local/global locks" (lglocks) can be used to:
+ *
+ * - Provide fast exclusive access to per-CPU data, with exclusive access to
+ *   another CPU's data allowed but possibly subject to contention, and to
+ *   provide very slow exclusive access to all per-CPU data.
+ * - Or to provide very fast and scalable read serialisation, and to provide
+ *   very slow exclusive serialisation of data (not necessarily per-CPU data).
+ *
+ * Brlocks are also implemented as a short-hand notation for the latter use
+ * case.
+ *
+ * Copyright 2009, 2010, Nick Piggin, Novell Inc.
+ */
+#ifndef __LINUX_LGLOCK_H
+#define __LINUX_LGLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/lockdep.h>
+#include <linux/percpu.h>
+
+/* can make br locks by using local lock for read side, global lock for write */
+#define br_lock_init(name)	name##_lock_init()
+#define br_read_lock(name)	name##_local_lock()
+#define br_read_unlock(name)	name##_local_unlock()
+#define br_write_lock(name)	name##_global_lock_online()
+#define br_write_unlock(name)	name##_global_unlock_online()
+
+#define DECLARE_BRLOCK(name)	DECLARE_LGLOCK(name)
+#define DEFINE_BRLOCK(name)	DEFINE_LGLOCK(name)
+
+
+#define lg_lock_init(name)	name##_lock_init()
+#define lg_local_lock(name)	name##_local_lock()
+#define lg_local_unlock(name)	name##_local_unlock()
+#define lg_local_lock_cpu(name, cpu)	name##_local_lock_cpu(cpu)
+#define lg_local_unlock_cpu(name, cpu)	name##_local_unlock_cpu(cpu)
+#define lg_global_lock(name)	name##_global_lock()
+#define lg_global_unlock(name)	name##_global_unlock()
+#define lg_global_lock_online(name) name##_global_lock_online()
+#define lg_global_unlock_online(name) name##_global_unlock_online()
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define LOCKDEP_INIT_MAP lockdep_init_map
+
+#define DEFINE_LGLOCK_LOCKDEP(name)					\
+ struct lock_class_key name##_lock_key;					\
+ struct lockdep_map name##_lock_dep_map;				\
+ EXPORT_SYMBOL(name##_lock_dep_map)
+
+#else
+#define LOCKDEP_INIT_MAP(a, b, c, d)
+
+#define DEFINE_LGLOCK_LOCKDEP(name)
+#endif
+
+
+#define DECLARE_LGLOCK(name)						\
+ extern void name##_lock_init(void);					\
+ extern void name##_local_lock(void);					\
+ extern void name##_local_unlock(void);					\
+ extern void name##_local_lock_cpu(int cpu);				\
+ extern void name##_local_unlock_cpu(int cpu);				\
+ extern void name##_global_lock(void);					\
+ extern void name##_global_unlock(void);				\
+ extern void name##_global_lock_online(void);				\
+ extern void name##_global_unlock_online(void);				\
+
+#define DEFINE_LGLOCK(name)						\
+									\
+ DEFINE_PER_CPU(arch_spinlock_t, name##_lock);				\
+ DEFINE_LGLOCK_LOCKDEP(name);						\
+									\
+ void name##_lock_init(void) {						\
+	int i;								\
+	LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+	for_each_possible_cpu(i) {					\
+		arch_spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;	\
+	}								\
+ }									\
+ EXPORT_SYMBOL(name##_lock_init);					\
+									\
+ void name##_local_lock(void) {						\
+	arch_spinlock_t *lock;						\
+	preempt_disable();						\
+	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\
+	lock = &__get_cpu_var(name##_lock);				\
+	arch_spin_lock(lock);						\
+ }									\
+ EXPORT_SYMBOL(name##_local_lock);					\
+									\
+ void name##_local_unlock(void) {					\
+	arch_spinlock_t *lock;						\
+	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\
+	lock = &__get_cpu_var(name##_lock);				\
+	arch_spin_unlock(lock);						\
+	preempt_enable();						\
+ }									\
+ EXPORT_SYMBOL(name##_local_unlock);					\
+									\
+ void name##_local_lock_cpu(int cpu) {					\
+	arch_spinlock_t *lock;						\
+	preempt_disable();						\
+	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\
+	lock = &per_cpu(name##_lock, cpu);				\
+	arch_spin_lock(lock);						\
+ }									\
+ EXPORT_SYMBOL(name##_local_lock_cpu);					\
+									\
+ void name##_local_unlock_cpu(int cpu) {				\
+	arch_spinlock_t *lock;						\
+	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\
+	lock = &per_cpu(name##_lock, cpu);				\
+	arch_spin_unlock(lock);						\
+	preempt_enable();						\
+ }									\
+ EXPORT_SYMBOL(name##_local_unlock_cpu);				\
+									\
+ void name##_global_lock_online(void) {					\
+	int i;								\
+	preempt_disable();						\
+	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
+	for_each_online_cpu(i) {					\
+		arch_spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		arch_spin_lock(lock);					\
+	}								\
+ }									\
+ EXPORT_SYMBOL(name##_global_lock_online);				\
+									\
+ void name##_global_unlock_online(void) {				\
+	int i;								\
+	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
+	for_each_online_cpu(i) {					\
+		arch_spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		arch_spin_unlock(lock);					\
+	}								\
+	preempt_enable();						\
+ }									\
+ EXPORT_SYMBOL(name##_global_unlock_online);				\
+									\
+ void name##_global_lock(void) {					\
+	int i;								\
+	preempt_disable();						\
+	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
+	for_each_online_cpu(i) {					\
+		arch_spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		arch_spin_lock(lock);					\
+	}								\
+ }									\
+ EXPORT_SYMBOL(name##_global_lock);					\
+									\
+ void name##_global_unlock(void) {					\
+	int i;								\
+	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
+	for_each_online_cpu(i) {					\
+		arch_spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		arch_spin_unlock(lock);					\
+	}								\
+	preempt_enable();						\
+ }									\
+ EXPORT_SYMBOL(name##_global_unlock);
+#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index b8bb9a6..ee7e258 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -134,7 +134,7 @@
 					   within vm_mm. */
 
 	/* linked list of VM areas per task, sorted by address */
-	struct vm_area_struct *vm_next;
+	struct vm_area_struct *vm_next, *vm_prev;
 
 	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
 	unsigned long vm_flags;		/* Flags, see mm.h. */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ce160d6..1e2a6db 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2109,7 +2109,9 @@
 extern int allow_signal(int);
 extern int disallow_signal(int);
 
-extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
+extern int do_execve(const char *,
+		     const char __user * const __user *,
+		     const char __user * const __user *, struct pt_regs *);
 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
 struct task_struct *fork_idle(int);
 
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 6d14409..9f63538 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -68,7 +68,7 @@
  * Slab cache management.
  */
 struct kmem_cache {
-	struct kmem_cache_cpu *cpu_slab;
+	struct kmem_cache_cpu __percpu *cpu_slab;
 	/* Used for retriving partial slabs etc */
 	unsigned long flags;
 	int size;		/* The size of an object including meta data */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index ae0a528..92e52a1 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -213,6 +213,9 @@
  * @dma_alignment: SPI controller constraint on DMA buffers alignment.
  * @mode_bits: flags understood by this controller driver
  * @flags: other constraints relevant to this driver
+ * @bus_lock_spinlock: spinlock for SPI bus locking
+ * @bus_lock_mutex: mutex for SPI bus locking
+ * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
  * @setup: updates the device mode and clocking records used by a
  *	device's SPI controller; protocol code may call this.  This
  *	must fail if an unrecognized or unsupported mode is requested.
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 6e5d197..e6319d1 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -820,7 +820,7 @@
 				  u64 mask, int fd,
 				  const char  __user *pathname);
 
-int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
+int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]);
 
 
 asmlinkage long sys_perf_event_open(
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1437da3..67d64e6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -329,6 +329,13 @@
 	struct tty_port *port;
 };
 
+/* Each of a tty's open files has private_data pointing to tty_file_private */
+struct tty_file_private {
+	struct tty_struct *tty;
+	struct file *file;
+	struct list_head list;
+};
+
 /* tty magic number */
 #define TTY_MAGIC		0x5401
 
@@ -458,6 +465,7 @@
 extern struct tty_struct *get_current_tty(void);
 extern void tty_default_fops(struct file_operations *fops);
 extern struct tty_struct *alloc_tty_struct(void);
+extern void tty_add_file(struct tty_struct *tty, struct file *file);
 extern void free_tty_struct(struct tty_struct *tty);
 extern void initialize_tty_struct(struct tty_struct *tty,
 		struct tty_driver *driver, int idx);
@@ -470,6 +478,7 @@
 extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
 
 extern struct mutex tty_mutex;
+extern spinlock_t tty_files_lock;
 
 extern void tty_write_unlock(struct tty_struct *tty);
 extern int tty_write_lock(struct tty_struct *tty, int ndelay);
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 6a664c3..7dc97d1 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1707,6 +1707,7 @@
 	unsigned int card_type;			/* EMU10K1_CARD_* */
 	unsigned int ecard_ctrl;		/* ecard control bits */
 	unsigned long dma_mask;			/* PCI DMA mask */
+	unsigned int delay_pcm_irq;		/* in samples */
 	int max_cache_pages;			/* max memory size / PAGE_SIZE */
 	struct snd_dma_buffer silent_page;	/* silent page */
 	struct snd_dma_buffer ptb_pages;	/* page table pages */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
new file mode 100644
index 0000000..49682d7
--- /dev/null
+++ b/include/trace/events/workqueue.h
@@ -0,0 +1,62 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM workqueue
+
+#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WORKQUEUE_H
+
+#include <linux/tracepoint.h>
+#include <linux/workqueue.h>
+
+/**
+ * workqueue_execute_start - called immediately before the workqueue callback
+ * @work:	pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(workqueue_execute_start,
+
+	TP_PROTO(struct work_struct *work),
+
+	TP_ARGS(work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= work->func;
+	),
+
+	TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
+);
+
+/**
+ * workqueue_execute_end - called immediately before the workqueue callback
+ * @work:	pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(workqueue_execute_end,
+
+	TP_PROTO(struct work_struct *work),
+
+	TP_ARGS(work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+	),
+
+	TP_printk("work struct %p", __entry->work)
+);
+
+
+#endif /*  _TRACE_WORKQUEUE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 2b10853..3098a38 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -24,10 +24,11 @@
 
 __setup("noinitrd", no_initrd);
 
-static int __init do_linuxrc(void * shell)
+static int __init do_linuxrc(void *_shell)
 {
-	static char *argv[] = { "linuxrc", NULL, };
-	extern char * envp_init[];
+	static const char *argv[] = { "linuxrc", NULL, };
+	extern const char *envp_init[];
+	const char *shell = _shell;
 
 	sys_close(old_fd);sys_close(root_fd);
 	sys_setsid();
diff --git a/init/main.c b/init/main.c
index 22d61cb..94ab488 100644
--- a/init/main.c
+++ b/init/main.c
@@ -197,8 +197,8 @@
 
 __setup("reset_devices", set_reset_devices);
 
-static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
-char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
 static const char *panic_later, *panic_param;
 
 extern const struct obs_kernel_param __setup_start[], __setup_end[];
@@ -809,7 +809,7 @@
 		do_one_initcall(*fn);
 }
 
-static void run_init_process(char *init_filename)
+static void run_init_process(const char *init_filename)
 {
 	argv_init[0] = init_filename;
 	kernel_execve(init_filename, argv_init, envp_init);
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index c438f54..be775f7 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -255,7 +255,14 @@
 extern void kdb_print_nameval(const char *name, unsigned long val);
 extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
 extern void kdb_meminfo_proc_show(void);
+#ifdef CONFIG_KALLSYMS
 extern const char *kdb_walk_kallsyms(loff_t *pos);
+#else /* ! CONFIG_KALLSYMS */
+static inline const char *kdb_walk_kallsyms(loff_t *pos)
+{
+	return NULL;
+}
+#endif /* ! CONFIG_KALLSYMS */
 extern char *kdb_getstr(char *, size_t, char *);
 
 /* Defines for kdb_symbol_print */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 45344d5c..6b2485d 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -82,8 +82,8 @@
 int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
 {
 	int ret = 0;
-	unsigned long symbolsize;
-	unsigned long offset;
+	unsigned long symbolsize = 0;
+	unsigned long offset = 0;
 #define knt1_size 128		/* must be >= kallsyms table size */
 	char *knt1 = NULL;
 
diff --git a/kernel/exit.c b/kernel/exit.c
index 671ed56..0312022 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1386,8 +1386,7 @@
 	if (!unlikely(wo->wo_flags & WNOWAIT))
 		*p_code = 0;
 
-	/* don't need the RCU readlock here as we're holding a spinlock */
-	uid = __task_cred(p)->uid;
+	uid = task_uid(p);
 unlock_sig:
 	spin_unlock_irq(&p->sighand->siglock);
 	if (!exit_code)
@@ -1460,7 +1459,7 @@
 	}
 	if (!unlikely(wo->wo_flags & WNOWAIT))
 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
-	uid = __task_cred(p)->uid;
+	uid = task_uid(p);
 	spin_unlock_irq(&p->sighand->siglock);
 
 	pid = task_pid_vnr(p);
diff --git a/kernel/fork.c b/kernel/fork.c
index 98b4508..b7e9d60 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -300,7 +300,7 @@
 #ifdef CONFIG_MMU
 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-	struct vm_area_struct *mpnt, *tmp, **pprev;
+	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
 	struct rb_node **rb_link, *rb_parent;
 	int retval;
 	unsigned long charge;
@@ -328,6 +328,7 @@
 	if (retval)
 		goto out;
 
+	prev = NULL;
 	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
 		struct file *file;
 
@@ -359,7 +360,7 @@
 			goto fail_nomem_anon_vma_fork;
 		tmp->vm_flags &= ~VM_LOCKED;
 		tmp->vm_mm = mm;
-		tmp->vm_next = NULL;
+		tmp->vm_next = tmp->vm_prev = NULL;
 		file = tmp->vm_file;
 		if (file) {
 			struct inode *inode = file->f_path.dentry->d_inode;
@@ -392,6 +393,8 @@
 		 */
 		*pprev = tmp;
 		pprev = &tmp->vm_next;
+		tmp->vm_prev = prev;
+		prev = tmp;
 
 		__vma_link_rb(mm, tmp, rb_link, rb_parent);
 		rb_link = &tmp->vm_rb.rb_right;
@@ -752,13 +755,13 @@
 	struct fs_struct *fs = current->fs;
 	if (clone_flags & CLONE_FS) {
 		/* tsk->fs is already what we want */
-		write_lock(&fs->lock);
+		spin_lock(&fs->lock);
 		if (fs->in_exec) {
-			write_unlock(&fs->lock);
+			spin_unlock(&fs->lock);
 			return -EAGAIN;
 		}
 		fs->users++;
-		write_unlock(&fs->lock);
+		spin_unlock(&fs->lock);
 		return 0;
 	}
 	tsk->fs = copy_fs_struct(fs);
@@ -1676,13 +1679,13 @@
 
 		if (new_fs) {
 			fs = current->fs;
-			write_lock(&fs->lock);
+			spin_lock(&fs->lock);
 			current->fs = new_fs;
 			if (--fs->users)
 				new_fs = NULL;
 			else
 				new_fs = fs;
-			write_unlock(&fs->lock);
+			spin_unlock(&fs->lock);
 		}
 
 		if (new_mm) {
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 4502604..6b5580c 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -503,6 +503,15 @@
 }
 EXPORT_SYMBOL(__kfifo_out_r);
 
+void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
+{
+	unsigned int n;
+
+	n = __kfifo_peek_n(fifo, recsize);
+	fifo->out += n + recsize;
+}
+EXPORT_SYMBOL(__kfifo_skip_r);
+
 int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
 	unsigned long len, unsigned int *copied, size_t recsize)
 {
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 6e9b196..9cd0591 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -153,7 +153,9 @@
 			goto fail;
 	}
 
-	retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);
+	retval = kernel_execve(sub_info->path,
+			       (const char *const *)sub_info->argv,
+			       (const char *const *)sub_info->envp);
 
 	/* Exec failed? */
 fail:
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3632ce8..19cccc3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3846,6 +3846,9 @@
 			rpos = reader->read;
 			pos += size;
 
+			if (rpos >= commit)
+				break;
+
 			event = rb_reader_event(cpu_buffer);
 			size = rb_event_length(event);
 		} while (len > size);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ba14a22..9ec59f5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3463,6 +3463,7 @@
 					size_t cnt, loff_t *fpos)
 {
 	char *buf;
+	size_t written;
 
 	if (tracing_disabled)
 		return -EINVAL;
@@ -3484,11 +3485,15 @@
 	} else
 		buf[cnt] = '\0';
 
-	cnt = mark_printk("%s", buf);
+	written = mark_printk("%s", buf);
 	kfree(buf);
-	*fpos += cnt;
+	*fpos += written;
 
-	return cnt;
+	/* don't tell userspace we wrote more - it might confuse them */
+	if (written > cnt)
+		written = cnt;
+
+	return written;
 }
 
 static int tracing_clock_show(struct seq_file *m, void *v)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 09b4fa6..4c758f1 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -598,88 +598,165 @@
 	return ret;
 }
 
-static void print_event_fields(struct trace_seq *s, struct list_head *head)
+enum {
+	FORMAT_HEADER		= 1,
+	FORMAT_PRINTFMT		= 2,
+};
+
+static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 {
+	struct ftrace_event_call *call = m->private;
 	struct ftrace_event_field *field;
+	struct list_head *head;
 
-	list_for_each_entry_reverse(field, head, link) {
-		/*
-		 * Smartly shows the array type(except dynamic array).
-		 * Normal:
-		 *	field:TYPE VAR
-		 * If TYPE := TYPE[LEN], it is shown:
-		 *	field:TYPE VAR[LEN]
-		 */
-		const char *array_descriptor = strchr(field->type, '[');
+	(*pos)++;
 
-		if (!strncmp(field->type, "__data_loc", 10))
-			array_descriptor = NULL;
+	switch ((unsigned long)v) {
+	case FORMAT_HEADER:
+		head = &ftrace_common_fields;
 
-		if (!array_descriptor) {
-			trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
-					"\tsize:%u;\tsigned:%d;\n",
-					field->type, field->name, field->offset,
-					field->size, !!field->is_signed);
-		} else {
-			trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
-					"\tsize:%u;\tsigned:%d;\n",
-					(int)(array_descriptor - field->type),
-					field->type, field->name,
-					array_descriptor, field->offset,
-					field->size, !!field->is_signed);
-		}
+		if (unlikely(list_empty(head)))
+			return NULL;
+
+		field = list_entry(head->prev, struct ftrace_event_field, link);
+		return field;
+
+	case FORMAT_PRINTFMT:
+		/* all done */
+		return NULL;
 	}
+
+	head = trace_get_fields(call);
+
+	/*
+	 * To separate common fields from event fields, the
+	 * LSB is set on the first event field. Clear it in case.
+	 */
+	v = (void *)((unsigned long)v & ~1L);
+
+	field = v;
+	/*
+	 * If this is a common field, and at the end of the list, then
+	 * continue with main list.
+	 */
+	if (field->link.prev == &ftrace_common_fields) {
+		if (unlikely(list_empty(head)))
+			return NULL;
+		field = list_entry(head->prev, struct ftrace_event_field, link);
+		/* Set the LSB to notify f_show to print an extra newline */
+		field = (struct ftrace_event_field *)
+			((unsigned long)field | 1);
+		return field;
+	}
+
+	/* If we are done tell f_show to print the format */
+	if (field->link.prev == head)
+		return (void *)FORMAT_PRINTFMT;
+
+	field = list_entry(field->link.prev, struct ftrace_event_field, link);
+
+	return field;
 }
 
-static ssize_t
-event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
-		  loff_t *ppos)
+static void *f_start(struct seq_file *m, loff_t *pos)
 {
-	struct ftrace_event_call *call = filp->private_data;
-	struct list_head *head;
-	struct trace_seq *s;
-	char *buf;
-	int r;
+	loff_t l = 0;
+	void *p;
 
-	if (*ppos)
+	/* Start by showing the header */
+	if (!*pos)
+		return (void *)FORMAT_HEADER;
+
+	p = (void *)FORMAT_HEADER;
+	do {
+		p = f_next(m, p, &l);
+	} while (p && l < *pos);
+
+	return p;
+}
+
+static int f_show(struct seq_file *m, void *v)
+{
+	struct ftrace_event_call *call = m->private;
+	struct ftrace_event_field *field;
+	const char *array_descriptor;
+
+	switch ((unsigned long)v) {
+	case FORMAT_HEADER:
+		seq_printf(m, "name: %s\n", call->name);
+		seq_printf(m, "ID: %d\n", call->event.type);
+		seq_printf(m, "format:\n");
 		return 0;
 
-	s = kmalloc(sizeof(*s), GFP_KERNEL);
-	if (!s)
-		return -ENOMEM;
-
-	trace_seq_init(s);
-
-	trace_seq_printf(s, "name: %s\n", call->name);
-	trace_seq_printf(s, "ID: %d\n", call->event.type);
-	trace_seq_printf(s, "format:\n");
-
-	/* print common fields */
-	print_event_fields(s, &ftrace_common_fields);
-
-	trace_seq_putc(s, '\n');
-
-	/* print event specific fields */
-	head = trace_get_fields(call);
-	print_event_fields(s, head);
-
-	r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt);
-
-	if (!r) {
-		/*
-		 * ug!  The format output is bigger than a PAGE!!
-		 */
-		buf = "FORMAT TOO BIG\n";
-		r = simple_read_from_buffer(ubuf, cnt, ppos,
-					      buf, strlen(buf));
-		goto out;
+	case FORMAT_PRINTFMT:
+		seq_printf(m, "\nprint fmt: %s\n",
+			   call->print_fmt);
+		return 0;
 	}
 
-	r = simple_read_from_buffer(ubuf, cnt, ppos,
-				    s->buffer, s->len);
- out:
-	kfree(s);
-	return r;
+	/*
+	 * To separate common fields from event fields, the
+	 * LSB is set on the first event field. Clear it and
+	 * print a newline if it is set.
+	 */
+	if ((unsigned long)v & 1) {
+		seq_putc(m, '\n');
+		v = (void *)((unsigned long)v & ~1L);
+	}
+
+	field = v;
+
+	/*
+	 * Smartly shows the array type(except dynamic array).
+	 * Normal:
+	 *	field:TYPE VAR
+	 * If TYPE := TYPE[LEN], it is shown:
+	 *	field:TYPE VAR[LEN]
+	 */
+	array_descriptor = strchr(field->type, '[');
+
+	if (!strncmp(field->type, "__data_loc", 10))
+		array_descriptor = NULL;
+
+	if (!array_descriptor)
+		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+			   field->type, field->name, field->offset,
+			   field->size, !!field->is_signed);
+	else
+		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+			   (int)(array_descriptor - field->type),
+			   field->type, field->name,
+			   array_descriptor, field->offset,
+			   field->size, !!field->is_signed);
+
+	return 0;
+}
+
+static void f_stop(struct seq_file *m, void *p)
+{
+}
+
+static const struct seq_operations trace_format_seq_ops = {
+	.start		= f_start,
+	.next		= f_next,
+	.stop		= f_stop,
+	.show		= f_show,
+};
+
+static int trace_format_open(struct inode *inode, struct file *file)
+{
+	struct ftrace_event_call *call = inode->i_private;
+	struct seq_file *m;
+	int ret;
+
+	ret = seq_open(file, &trace_format_seq_ops);
+	if (ret < 0)
+		return ret;
+
+	m = file->private_data;
+	m->private = call;
+
+	return 0;
 }
 
 static ssize_t
@@ -877,8 +954,10 @@
 };
 
 static const struct file_operations ftrace_event_format_fops = {
-	.open = tracing_open_generic,
-	.read = event_format_read,
+	.open = trace_format_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
 };
 
 static const struct file_operations ftrace_event_id_fops = {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6bff236..6f23369 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -507,7 +507,15 @@
 			 * if the output fails.
 			 */
 			data->ent = *curr;
-			data->ret = *next;
+			/*
+			 * If the next event is not a return type, then
+			 * we only care about what type it is. Otherwise we can
+			 * safely copy the entire event.
+			 */
+			if (next->ent.type == TRACE_GRAPH_RET)
+				data->ret = *next;
+			else
+				data->ret.ent.type = next->ent.type;
 		}
 	}
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2994a0e..8bd600c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -35,6 +35,9 @@
 #include <linux/lockdep.h>
 #include <linux/idr.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/workqueue.h>
+
 #include "workqueue_sched.h"
 
 enum {
@@ -1790,7 +1793,13 @@
 	work_clear_pending(work);
 	lock_map_acquire(&cwq->wq->lockdep_map);
 	lock_map_acquire(&lockdep_map);
+	trace_workqueue_execute_start(work);
 	f(work);
+	/*
+	 * While we must be careful to not use "work" after this, the trace
+	 * point will only record its address.
+	 */
+	trace_workqueue_execute_end(work);
 	lock_map_release(&lockdep_map);
 	lock_map_release(&cwq->wq->lockdep_map);
 
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9e06b7f..1b4afd2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -994,13 +994,16 @@
 
 config LATENCYTOP
 	bool "Latency measuring infrastructure"
+	depends on HAVE_LATENCYTOP_SUPPORT
+	depends on DEBUG_KERNEL
+	depends on STACKTRACE_SUPPORT
+	depends on PROC_FS
 	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
 	select KALLSYMS
 	select KALLSYMS_ALL
 	select STACKTRACE
 	select SCHEDSTATS
 	select SCHED_DEBUG
-	depends on HAVE_LATENCYTOP_SUPPORT
 	help
 	  Enable this option if you want to use the LatencyTOP tool
 	  to find out which userspace is blocking on what kernel operations.
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e907858..efd16fa 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -174,14 +174,16 @@
 {
 	struct radix_tree_node *node =
 			container_of(head, struct radix_tree_node, rcu_head);
+	int i;
 
 	/*
 	 * must only free zeroed nodes into the slab. radix_tree_shrink
 	 * can leave us with a non-NULL entry in the first slot, so clear
 	 * that here to make sure.
 	 */
-	tag_clear(node, 0, 0);
-	tag_clear(node, 1, 0);
+	for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
+		tag_clear(node, i, 0);
+
 	node->slots[0] = NULL;
 	node->count = 0;
 
@@ -623,17 +625,30 @@
  * also settag. The function stops either after tagging nr_to_tag items or
  * after reaching last_index.
  *
+ * The tags must be set from the leaf level only and propagated back up the
+ * path to the root. We must do this so that we resolve the full path before
+ * setting any tags on intermediate nodes. If we set tags as we descend, then
+ * we can get to the leaf node and find that the index that has the iftag
+ * set is outside the range we are scanning. This reults in dangling tags and
+ * can lead to problems with later tag operations (e.g. livelocks on lookups).
+ *
  * The function returns number of leaves where the tag was set and sets
  * *first_indexp to the first unscanned index.
+ * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
+ * be prepared to handle that.
  */
 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
 		unsigned long *first_indexp, unsigned long last_index,
 		unsigned long nr_to_tag,
 		unsigned int iftag, unsigned int settag)
 {
-	unsigned int height = root->height, shift;
-	unsigned long tagged = 0, index = *first_indexp;
-	struct radix_tree_node *open_slots[height], *slot;
+	unsigned int height = root->height;
+	struct radix_tree_path path[height];
+	struct radix_tree_path *pathp = path;
+	struct radix_tree_node *slot;
+	unsigned int shift;
+	unsigned long tagged = 0;
+	unsigned long index = *first_indexp;
 
 	last_index = min(last_index, radix_tree_maxindex(height));
 	if (index > last_index)
@@ -653,6 +668,13 @@
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 	slot = radix_tree_indirect_to_ptr(root->rnode);
 
+	/*
+	 * we fill the path from (root->height - 2) to 0, leaving the index at
+	 * (root->height - 1) as a terminator. Zero the node in the terminator
+	 * so that we can use this to end walk loops back up the path.
+	 */
+	path[height - 1].node = NULL;
+
 	for (;;) {
 		int offset;
 
@@ -661,21 +683,35 @@
 			goto next;
 		if (!tag_get(slot, iftag, offset))
 			goto next;
-		tag_set(slot, settag, offset);
-		if (height == 1) {
-			tagged++;
-			goto next;
+		if (height > 1) {
+			/* Go down one level */
+			height--;
+			shift -= RADIX_TREE_MAP_SHIFT;
+			path[height - 1].node = slot;
+			path[height - 1].offset = offset;
+			slot = slot->slots[offset];
+			continue;
 		}
-		/* Go down one level */
-		height--;
-		shift -= RADIX_TREE_MAP_SHIFT;
-		open_slots[height] = slot;
-		slot = slot->slots[offset];
-		continue;
+
+		/* tag the leaf */
+		tagged++;
+		tag_set(slot, settag, offset);
+
+		/* walk back up the path tagging interior nodes */
+		pathp = &path[0];
+		while (pathp->node) {
+			/* stop if we find a node with the tag already set */
+			if (tag_get(pathp->node, settag, pathp->offset))
+				break;
+			tag_set(pathp->node, settag, pathp->offset);
+			pathp++;
+		}
+
 next:
 		/* Go to next item at level determined by 'shift' */
 		index = ((index >> shift) + 1) << shift;
-		if (index > last_index)
+		/* Overflow can happen when last_index is ~0UL... */
+		if (index > last_index || !index)
 			break;
 		if (tagged >= nr_to_tag)
 			break;
@@ -685,7 +721,7 @@
 			 * last_index is guaranteed to be in the tree, what
 			 * we do below cannot wander astray.
 			 */
-			slot = open_slots[height];
+			slot = path[height - 1].node;
 			height++;
 			shift += RADIX_TREE_MAP_SHIFT;
 		}
diff --git a/mm/memory.c b/mm/memory.c
index b6e5fd2..2ed2267 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2770,11 +2770,18 @@
 {
 	address &= PAGE_MASK;
 	if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-		address -= PAGE_SIZE;
-		if (find_vma(vma->vm_mm, address) != vma)
-			return -ENOMEM;
+		struct vm_area_struct *prev = vma->vm_prev;
 
-		expand_stack(vma, address);
+		/*
+		 * Is there a mapping abutting this one below?
+		 *
+		 * That's only ok if it's the same stack mapping
+		 * that has gotten split..
+		 */
+		if (prev && prev->vm_end == address)
+			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+
+		expand_stack(vma, address - PAGE_SIZE);
 	}
 	return 0;
 }
diff --git a/mm/mlock.c b/mm/mlock.c
index 49e5e4c..cbae7c5 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,6 +135,19 @@
 	}
 }
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	return (vma->vm_flags & VM_GROWSDOWN) &&
+		(vma->vm_start == addr) &&
+		!vma_stack_continue(vma->vm_prev, addr);
+}
+
 /**
  * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
  * @vma:   target vma
@@ -168,11 +181,9 @@
 		gup_flags |= FOLL_WRITE;
 
 	/* We don't try to access the guard page of a stack vma */
-	if (vma->vm_flags & VM_GROWSDOWN) {
-		if (start == vma->vm_start) {
-			start += PAGE_SIZE;
-			nr_pages--;
-		}
+	if (stack_guard_page(vma, start)) {
+		addr += PAGE_SIZE;
+		nr_pages--;
 	}
 
 	while (nr_pages > 0) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 3100333..331e51a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -388,17 +388,23 @@
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 		struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
+	struct vm_area_struct *next;
+
+	vma->vm_prev = prev;
 	if (prev) {
-		vma->vm_next = prev->vm_next;
+		next = prev->vm_next;
 		prev->vm_next = vma;
 	} else {
 		mm->mmap = vma;
 		if (rb_parent)
-			vma->vm_next = rb_entry(rb_parent,
+			next = rb_entry(rb_parent,
 					struct vm_area_struct, vm_rb);
 		else
-			vma->vm_next = NULL;
+			next = NULL;
 	}
+	vma->vm_next = next;
+	if (next)
+		next->vm_prev = vma;
 }
 
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -483,7 +489,11 @@
 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
 		struct vm_area_struct *prev)
 {
-	prev->vm_next = vma->vm_next;
+	struct vm_area_struct *next = vma->vm_next;
+
+	prev->vm_next = next;
+	if (next)
+		next->vm_prev = prev;
 	rb_erase(&vma->vm_rb, &mm->mm_rb);
 	if (mm->mmap_cache == vma)
 		mm->mmap_cache = prev;
@@ -1915,6 +1925,7 @@
 	unsigned long addr;
 
 	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+	vma->vm_prev = NULL;
 	do {
 		rb_erase(&vma->vm_rb, &mm->mm_rb);
 		mm->map_count--;
@@ -1922,6 +1933,8 @@
 		vma = vma->vm_next;
 	} while (vma && vma->vm_start < end);
 	*insertion_point = vma;
+	if (vma)
+		vma->vm_prev = prev;
 	tail_vma->vm_next = NULL;
 	if (mm->unmap_area == arch_unmap_area)
 		addr = prev ? prev->vm_end : mm->mmap_base;
diff --git a/mm/nommu.c b/mm/nommu.c
index efa9a38..88ff091 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -604,7 +604,7 @@
  */
 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-	struct vm_area_struct *pvma, **pp;
+	struct vm_area_struct *pvma, **pp, *next;
 	struct address_space *mapping;
 	struct rb_node **p, *parent;
 
@@ -664,8 +664,11 @@
 			break;
 	}
 
-	vma->vm_next = *pp;
+	next = *pp;
 	*pp = vma;
+	vma->vm_next = next;
+	if (next)
+		next->vm_prev = vma;
 }
 
 /*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5014e50..fc81cb2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -372,7 +372,7 @@
 		}
 
 		pr_info("[%5d] %5d %5d %8lu %8lu %3u     %3d         %5d %s\n",
-			task->pid, __task_cred(task)->uid, task->tgid,
+			task->pid, task_uid(task), task->tgid,
 			task->mm->total_vm, get_mm_rss(task->mm),
 			task_cpu(task), task->signal->oom_adj,
 			task->signal->oom_score_adj, task->comm);
@@ -401,10 +401,9 @@
 static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
 {
 	p = find_lock_task_mm(p);
-	if (!p) {
-		task_unlock(p);
+	if (!p)
 		return 1;
-	}
+
 	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
 		task_pid_nr(p), p->comm, K(p->mm->total_vm),
 		K(get_mm_counter(p->mm, MM_ANONPAGES)),
@@ -647,6 +646,7 @@
 	unsigned long freed = 0;
 	unsigned int points;
 	enum oom_constraint constraint = CONSTRAINT_NONE;
+	int killed = 0;
 
 	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
 	if (freed > 0)
@@ -684,7 +684,7 @@
 		if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
 				NULL, nodemask,
 				"Out of memory (oom_kill_allocating_task)"))
-			return;
+			goto out;
 	}
 
 retry:
@@ -692,7 +692,7 @@
 			constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
 								 NULL);
 	if (PTR_ERR(p) == -1UL)
-		return;
+		goto out;
 
 	/* Found nothing?!?! Either we hang forever, or we panic. */
 	if (!p) {
@@ -704,13 +704,15 @@
 	if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
 				nodemask, "Out of memory"))
 		goto retry;
+	killed = 1;
+out:
 	read_unlock(&tasklist_lock);
 
 	/*
 	 * Give "p" a good chance of killing itself before we
 	 * retry to allocate memory unless "p" is current
 	 */
-	if (!test_thread_flag(TIF_MEMDIE))
+	if (killed && !test_thread_flag(TIF_MEMDIE))
 		schedule_timeout_uninterruptible(1);
 }
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7262aac..c09ef52 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -836,7 +836,8 @@
 		spin_unlock_irq(&mapping->tree_lock);
 		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
 		cond_resched();
-	} while (tagged >= WRITEBACK_TAG_BATCH);
+		/* We check 'start' to handle wrapping when end == ~0UL */
+	} while (tagged >= WRITEBACK_TAG_BATCH && start);
 }
 EXPORT_SYMBOL(tag_pages_for_writeback);
 
diff --git a/mm/shmem.c b/mm/shmem.c
index dfaa0f4..080b09a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2325,7 +2325,10 @@
 
 static void shmem_put_super(struct super_block *sb)
 {
-	kfree(sb->s_fs_info);
+	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+
+	percpu_counter_destroy(&sbinfo->used_blocks);
+	kfree(sbinfo);
 	sb->s_fs_info = NULL;
 }
 
@@ -2367,7 +2370,8 @@
 #endif
 
 	spin_lock_init(&sbinfo->stat_lock);
-	percpu_counter_init(&sbinfo->used_blocks, 0);
+	if (percpu_counter_init(&sbinfo->used_blocks, 0))
+		goto failed;
 	sbinfo->free_inodes = sbinfo->max_inodes;
 
 	sb->s_maxbytes = SHMEM_MAX_BYTES;
diff --git a/mm/slab.c b/mm/slab.c
index 88435fc..fcae981 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2330,8 +2330,8 @@
 	}
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
-	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
-		cachep->obj_offset += PAGE_SIZE - size;
+	    && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
+		cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
 		size = PAGE_SIZE;
 	}
 #endif
diff --git a/net/core/dev.c b/net/core/dev.c
index 1ae6543..3721fbb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3143,7 +3143,7 @@
 			put_page(skb_shinfo(skb)->frags[0].page);
 			memmove(skb_shinfo(skb)->frags,
 				skb_shinfo(skb)->frags + 1,
-				--skb_shinfo(skb)->nr_frags);
+				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
 		}
 	}
 
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 6bccba3..51d6c31 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -735,6 +735,7 @@
 		if (cpu == curcpu)
 			continue;
 		i = 0;
+		local_bh_disable();
 		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
 			ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -742,6 +743,7 @@
 			++i;
 		}
 		xt_info_wrunlock(cpu);
+		local_bh_enable();
 	}
 	put_cpu();
 }
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index c439721..97b64b2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -909,6 +909,7 @@
 		if (cpu == curcpu)
 			continue;
 		i = 0;
+		local_bh_disable();
 		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
 			ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -916,6 +917,7 @@
 			++i; /* macro does multi eval of i */
 		}
 		xt_info_wrunlock(cpu);
+		local_bh_enable();
 	}
 	put_cpu();
 }
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 5359ef4..29a7bca 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -922,6 +922,7 @@
 		if (cpu == curcpu)
 			continue;
 		i = 0;
+		local_bh_disable();
 		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
 			ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -929,6 +930,7 @@
 			++i;
 		}
 		xt_info_wrunlock(cpu);
+		local_bh_enable();
 	}
 	put_cpu();
 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8f2d040..d126365 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2580,7 +2580,7 @@
 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
 		.maxlen		=	sizeof(int),
 		.mode		=	0644,
-		.proc_handler	=	proc_dointvec_jiffies,
+		.proc_handler	=	proc_dointvec,
 	},
 	{
 		.procname	=	"mtu_expires",
@@ -2594,7 +2594,7 @@
 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
 		.maxlen		=	sizeof(int),
 		.mode		=	0644,
-		.proc_handler	=	proc_dointvec_jiffies,
+		.proc_handler	=	proc_dointvec,
 	},
 	{
 		.procname	=	"gc_min_interval_ms",
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2cbf380..8648a99 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1406,7 +1406,7 @@
 	struct netlink_sock *nlk = nlk_sk(sk);
 	int noblock = flags&MSG_DONTWAIT;
 	size_t copied;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *frag __maybe_unused = NULL;
 	int err;
 
 	if (flags&MSG_OOB)
@@ -1441,21 +1441,7 @@
 			kfree_skb(skb);
 			skb = compskb;
 		} else {
-			/*
-			 * Before setting frag_list to NULL, we must get a
-			 * private copy of skb if shared (because of MSG_PEEK)
-			 */
-			if (skb_shared(skb)) {
-				struct sk_buff *nskb;
-
-				nskb = pskb_copy(skb, GFP_KERNEL);
-				kfree_skb(skb);
-				skb = nskb;
-				err = -ENOMEM;
-				if (!skb)
-					goto out;
-			}
-			kfree_skb(skb_shinfo(skb)->frag_list);
+			frag = skb_shinfo(skb)->frag_list;
 			skb_shinfo(skb)->frag_list = NULL;
 		}
 	}
@@ -1492,6 +1478,10 @@
 	if (flags & MSG_TRUNC)
 		copied = skb->len;
 
+#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
+	skb_shinfo(skb)->frag_list = frag;
+#endif
+
 	skb_free_datagram(sk, skb);
 
 	if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 8406c665..c2ed90a 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -152,21 +152,24 @@
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
 	unsigned char *b = skb_tail_pointer(skb);
-	struct tc_gact opt;
 	struct tcf_gact *gact = a->priv;
+	struct tc_gact opt = {
+		.index   = gact->tcf_index,
+		.refcnt  = gact->tcf_refcnt - ref,
+		.bindcnt = gact->tcf_bindcnt - bind,
+		.action  = gact->tcf_action,
+	};
 	struct tcf_t t;
 
-	opt.index = gact->tcf_index;
-	opt.refcnt = gact->tcf_refcnt - ref;
-	opt.bindcnt = gact->tcf_bindcnt - bind;
-	opt.action = gact->tcf_action;
 	NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
 #ifdef CONFIG_GACT_PROB
 	if (gact->tcfg_ptype) {
-		struct tc_gact_p p_opt;
-		p_opt.paction = gact->tcfg_paction;
-		p_opt.pval = gact->tcfg_pval;
-		p_opt.ptype = gact->tcfg_ptype;
+		struct tc_gact_p p_opt = {
+			.paction = gact->tcfg_paction,
+			.pval    = gact->tcfg_pval,
+			.ptype   = gact->tcfg_ptype,
+		};
+
 		NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
 	}
 #endif
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 11f195a..0c311be 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -219,15 +219,16 @@
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_mirred *m = a->priv;
-	struct tc_mirred opt;
+	struct tc_mirred opt = {
+		.index   = m->tcf_index,
+		.action  = m->tcf_action,
+		.refcnt  = m->tcf_refcnt - ref,
+		.bindcnt = m->tcf_bindcnt - bind,
+		.eaction = m->tcfm_eaction,
+		.ifindex = m->tcfm_ifindex,
+	};
 	struct tcf_t t;
 
-	opt.index = m->tcf_index;
-	opt.action = m->tcf_action;
-	opt.refcnt = m->tcf_refcnt - ref;
-	opt.bindcnt = m->tcf_bindcnt - bind;
-	opt.eaction = m->tcfm_eaction;
-	opt.ifindex = m->tcfm_ifindex;
 	NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
 	t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 509a2d5..186eb83 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -272,19 +272,19 @@
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_nat *p = a->priv;
-	struct tc_nat opt;
+	struct tc_nat opt = {
+		.old_addr = p->old_addr,
+		.new_addr = p->new_addr,
+		.mask     = p->mask,
+		.flags    = p->flags,
+
+		.index    = p->tcf_index,
+		.action   = p->tcf_action,
+		.refcnt   = p->tcf_refcnt - ref,
+		.bindcnt  = p->tcf_bindcnt - bind,
+	};
 	struct tcf_t t;
 
-	opt.old_addr = p->old_addr;
-	opt.new_addr = p->new_addr;
-	opt.mask = p->mask;
-	opt.flags = p->flags;
-
-	opt.index = p->tcf_index;
-	opt.action = p->tcf_action;
-	opt.refcnt = p->tcf_refcnt - ref;
-	opt.bindcnt = p->tcf_bindcnt - bind;
-
 	NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 4a1d640..97e84f3 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -164,13 +164,14 @@
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_defact *d = a->priv;
-	struct tc_defact opt;
+	struct tc_defact opt = {
+		.index   = d->tcf_index,
+		.refcnt  = d->tcf_refcnt - ref,
+		.bindcnt = d->tcf_bindcnt - bind,
+		.action  = d->tcf_action,
+	};
 	struct tcf_t t;
 
-	opt.index = d->tcf_index;
-	opt.refcnt = d->tcf_refcnt - ref;
-	opt.bindcnt = d->tcf_bindcnt - bind;
-	opt.action = d->tcf_action;
 	NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
 	NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
 	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index e9607fe..66cbf4e 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -159,13 +159,14 @@
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_skbedit *d = a->priv;
-	struct tc_skbedit opt;
+	struct tc_skbedit opt = {
+		.index   = d->tcf_index,
+		.refcnt  = d->tcf_refcnt - ref,
+		.bindcnt = d->tcf_bindcnt - bind,
+		.action  = d->tcf_action,
+	};
 	struct tcf_t t;
 
-	opt.index = d->tcf_index;
-	opt.refcnt = d->tcf_refcnt - ref;
-	opt.bindcnt = d->tcf_bindcnt - bind;
-	opt.action = d->tcf_action;
 	NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
 	if (d->flags & SKBEDIT_F_PRIORITY)
 		NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 443c161..3376d76 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -18,10 +18,11 @@
 	  If unsure, say N.
 
 config RPCSEC_GSS_KRB5
-	tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)"
-	depends on SUNRPC && EXPERIMENTAL
+	tristate
+	depends on SUNRPC && CRYPTO
+	prompt "Secure RPC: Kerberos V mechanism" if !(NFS_V4 || NFSD_V4)
+	default y
 	select SUNRPC_GSS
-	select CRYPTO
 	select CRYPTO_MD5
 	select CRYPTO_DES
 	select CRYPTO_CBC
@@ -34,7 +35,7 @@
 	  available from http://linux-nfs.org/.  In addition, user-space
 	  Kerberos support should be installed.
 
-	  If unsure, say N.
+	  If unsure, say Y.
 
 config RPCSEC_GSS_SPKM3
 	tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index e5e28d1..2ac3f6e 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -249,6 +249,8 @@
 	req->rl_nchunks = nchunks;
 
 	BUG_ON(nchunks == 0);
+	BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
+	       && (nchunks > 3));
 
 	/*
 	 * finish off header. If write, marshal discrim and nchunks.
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 27015c6..5f4c7b3 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -650,10 +650,22 @@
 	ep->rep_attr.cap.max_send_wr = cdata->max_requests;
 	switch (ia->ri_memreg_strategy) {
 	case RPCRDMA_FRMR:
-		/* Add room for frmr register and invalidate WRs */
-		ep->rep_attr.cap.max_send_wr *= 3;
-		if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr)
-			return -EINVAL;
+		/* Add room for frmr register and invalidate WRs.
+		 * 1. FRMR reg WR for head
+		 * 2. FRMR invalidate WR for head
+		 * 3. FRMR reg WR for pagelist
+		 * 4. FRMR invalidate WR for pagelist
+		 * 5. FRMR reg WR for tail
+		 * 6. FRMR invalidate WR for tail
+		 * 7. The RDMA_SEND WR
+		 */
+		ep->rep_attr.cap.max_send_wr *= 7;
+		if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) {
+			cdata->max_requests = devattr.max_qp_wr / 7;
+			if (!cdata->max_requests)
+				return -EINVAL;
+			ep->rep_attr.cap.max_send_wr = cdata->max_requests * 7;
+		}
 		break;
 	case RPCRDMA_MEMWINDOWS_ASYNC:
 	case RPCRDMA_MEMWINDOWS:
@@ -1490,7 +1502,7 @@
 	memset(&frmr_wr, 0, sizeof frmr_wr);
 	frmr_wr.opcode = IB_WR_FAST_REG_MR;
 	frmr_wr.send_flags = 0;			/* unsignaled */
-	frmr_wr.wr.fast_reg.iova_start = (unsigned long)seg1->mr_dma;
+	frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
 	frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
 	frmr_wr.wr.fast_reg.page_list_len = i;
 	frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 49a62f0..b6309db 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1305,10 +1305,11 @@
 	if (!(xprt = xprt_from_sock(sk)))
 		goto out;
 	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
-	dprintk("RPC:       state %x conn %d dead %d zapped %d\n",
+	dprintk("RPC:       state %x conn %d dead %d zapped %d sk_shutdown %d\n",
 			sk->sk_state, xprt_connected(xprt),
 			sock_flag(sk, SOCK_DEAD),
-			sock_flag(sk, SOCK_ZAPPED));
+			sock_flag(sk, SOCK_ZAPPED),
+			sk->sk_shutdown);
 
 	switch (sk->sk_state) {
 	case TCP_ESTABLISHED:
@@ -1779,10 +1780,25 @@
 {
 	unsigned int state = transport->inet->sk_state;
 
-	if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED)
-		return;
-	if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT))
-		return;
+	if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
+		/* we don't need to abort the connection if the socket
+		 * hasn't undergone a shutdown
+		 */
+		if (transport->inet->sk_shutdown == 0)
+			return;
+		dprintk("RPC:       %s: TCP_CLOSEd and sk_shutdown set to %d\n",
+				__func__, transport->inet->sk_shutdown);
+	}
+	if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
+		/* we don't need to abort the connection if the socket
+		 * hasn't undergone a shutdown
+		 */
+		if (transport->inet->sk_shutdown == 0)
+			return;
+		dprintk("RPC:       %s: ESTABLISHED/SYN_SENT "
+				"sk_shutdown set to %d\n",
+				__func__, transport->inet->sk_shutdown);
+	}
 	xs_abort_connection(xprt, transport);
 }
 
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index ba59983..b14ed4b 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2504,7 +2504,7 @@
 	if (p->dir > XFRM_POLICY_OUT)
 		return NULL;
 
-	xp = xfrm_policy_alloc(net, GFP_KERNEL);
+	xp = xfrm_policy_alloc(net, GFP_ATOMIC);
 	if (xp == NULL) {
 		*dir = -ENOBUFS;
 		return NULL;
diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
index 642eef3..178061e 100644
--- a/samples/kfifo/bytestream-example.c
+++ b/samples/kfifo/bytestream-example.c
@@ -44,10 +44,17 @@
 static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE);
 #endif
 
+static const unsigned char expected_result[FIFO_SIZE] = {
+	 3,  4,  5,  6,  7,  8,  9,  0,
+	 1, 20, 21, 22, 23, 24, 25, 26,
+	27, 28, 29, 30, 31, 32, 33, 34,
+	35, 36, 37, 38, 39, 40, 41, 42,
+};
+
 static int __init testfunc(void)
 {
 	unsigned char	buf[6];
-	unsigned char	i;
+	unsigned char	i, j;
 	unsigned int	ret;
 
 	printk(KERN_INFO "byte stream fifo test start\n");
@@ -73,16 +80,34 @@
 	ret = kfifo_in(&test, buf, ret);
 	printk(KERN_INFO "ret: %d\n", ret);
 
+	/* skip first element of the fifo */
+	printk(KERN_INFO "skip 1st element\n");
+	kfifo_skip(&test);
+
 	/* put values into the fifo until is full */
 	for (i = 20; kfifo_put(&test, &i); i++)
 		;
 
 	printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
 
-	/* print out all values in the fifo */
-	while (kfifo_get(&test, &i))
-		printk("%d ", i);
-	printk("\n");
+	/* show the first value without removing from the fifo */
+	if (kfifo_peek(&test, &i))
+		printk(KERN_INFO "%d\n", i);
+
+	/* check the correctness of all values in the fifo */
+	j = 0;
+	while (kfifo_get(&test, &i)) {
+		printk(KERN_INFO "item = %d\n", i);
+		if (i != expected_result[j++]) {
+			printk(KERN_WARNING "value mismatch: test failed\n");
+			return -EIO;
+		}
+	}
+	if (j != ARRAY_SIZE(expected_result)) {
+		printk(KERN_WARNING "size mismatch: test failed\n");
+		return -EIO;
+	}
+	printk(KERN_INFO "test passed\n");
 
 	return 0;
 }
@@ -138,7 +163,12 @@
 #else
 	INIT_KFIFO(test);
 #endif
-	testfunc();
+	if (testfunc() < 0) {
+#ifdef DYNAMIC
+		kfifo_free(&test);
+#endif
+		return -EIO;
+	}
 
 	if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
 #ifdef DYNAMIC
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
index b9482c2..ee03a4f 100644
--- a/samples/kfifo/dma-example.c
+++ b/samples/kfifo/dma-example.c
@@ -29,8 +29,8 @@
 	printk(KERN_INFO "DMA fifo test start\n");
 
 	if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
-		printk(KERN_ERR "error kfifo_alloc\n");
-		return 1;
+		printk(KERN_WARNING "error kfifo_alloc\n");
+		return -ENOMEM;
 	}
 
 	printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));
@@ -41,72 +41,99 @@
 		kfifo_put(&fifo, &i);
 
 	/* kick away first byte */
-	ret = kfifo_get(&fifo, &i);
+	kfifo_skip(&fifo);
 
 	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
 
+	/*
+	 * Configure the kfifo buffer to receive data from DMA input.
+	 *
+	 *  .--------------------------------------.
+	 *  | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
+	 *  |---|------------------|---------------|
+	 *   \_/ \________________/ \_____________/
+	 *    \          \                  \
+	 *     \          \_allocated data   \
+	 *      \_*free space*                \_*free space*
+	 *
+	 * We need two different SG entries: one for the free space area at the
+	 * end of the kfifo buffer (19 bytes) and another for the first free
+	 * byte at the beginning, after the kfifo_skip().
+	 */
+	sg_init_table(sg, ARRAY_SIZE(sg));
 	ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
 	printk(KERN_INFO "DMA sgl entries: %d\n", ret);
-
-	/* if 0 was returned, fifo is full and no sgl was created */
-	if (ret) {
-		printk(KERN_INFO "scatterlist for receive:\n");
-		for (i = 0; i < ARRAY_SIZE(sg); i++) {
-			printk(KERN_INFO
-			"sg[%d] -> "
-			"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
-				i, sg[i].page_link, sg[i].offset, sg[i].length);
-
-			if (sg_is_last(&sg[i]))
-				break;
-		}
-
-		/* but here your code to setup and exectute the dma operation */
-		/* ... */
-
-		/* example: zero bytes received */
-		ret = 0;
-
-		/* finish the dma operation and update the received data */
-		kfifo_dma_in_finish(&fifo, ret);
+	if (!ret) {
+		/* fifo is full and no sgl was created */
+		printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
+		return -EIO;
 	}
 
+	/* receive data */
+	printk(KERN_INFO "scatterlist for receive:\n");
+	for (i = 0; i < ARRAY_SIZE(sg); i++) {
+		printk(KERN_INFO
+		"sg[%d] -> "
+		"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
+			i, sg[i].page_link, sg[i].offset, sg[i].length);
+
+		if (sg_is_last(&sg[i]))
+			break;
+	}
+
+	/* put here your code to setup and exectute the dma operation */
+	/* ... */
+
+	/* example: zero bytes received */
+	ret = 0;
+
+	/* finish the dma operation and update the received data */
+	kfifo_dma_in_finish(&fifo, ret);
+
+	/* Prepare to transmit data, example: 8 bytes */
 	ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
 	printk(KERN_INFO "DMA sgl entries: %d\n", ret);
-
-	/* if 0 was returned, no data was available and no sgl was created */
-	if (ret) {
-		printk(KERN_INFO "scatterlist for transmit:\n");
-		for (i = 0; i < ARRAY_SIZE(sg); i++) {
-			printk(KERN_INFO
-			"sg[%d] -> "
-			"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
-				i, sg[i].page_link, sg[i].offset, sg[i].length);
-
-			if (sg_is_last(&sg[i]))
-				break;
-		}
-
-		/* but here your code to setup and exectute the dma operation */
-		/* ... */
-
-		/* example: 5 bytes transmitted */
-		ret = 5;
-
-		/* finish the dma operation and update the transmitted data */
-		kfifo_dma_out_finish(&fifo, ret);
+	if (!ret) {
+		/* no data was available and no sgl was created */
+		printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
+		return -EIO;
 	}
 
+	printk(KERN_INFO "scatterlist for transmit:\n");
+	for (i = 0; i < ARRAY_SIZE(sg); i++) {
+		printk(KERN_INFO
+		"sg[%d] -> "
+		"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
+			i, sg[i].page_link, sg[i].offset, sg[i].length);
+
+		if (sg_is_last(&sg[i]))
+			break;
+	}
+
+	/* put here your code to setup and exectute the dma operation */
+	/* ... */
+
+	/* example: 5 bytes transmitted */
+	ret = 5;
+
+	/* finish the dma operation and update the transmitted data */
+	kfifo_dma_out_finish(&fifo, ret);
+
+	ret = kfifo_len(&fifo);
 	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
 
+	if (ret != 7) {
+		printk(KERN_WARNING "size mismatch: test failed");
+		return -EIO;
+	}
+	printk(KERN_INFO "test passed\n");
+
 	return 0;
 }
 
 static void __exit example_exit(void)
 {
-#ifdef DYNAMIC
-	kfifo_free(&test);
-#endif
+	kfifo_free(&fifo);
 }
 
 module_init(example_init);
diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
index d6c5b7d..71b2aab 100644
--- a/samples/kfifo/inttype-example.c
+++ b/samples/kfifo/inttype-example.c
@@ -44,10 +44,17 @@
 static DEFINE_KFIFO(test, int, FIFO_SIZE);
 #endif
 
+static const int expected_result[FIFO_SIZE] = {
+	 3,  4,  5,  6,  7,  8,  9,  0,
+	 1, 20, 21, 22, 23, 24, 25, 26,
+	27, 28, 29, 30, 31, 32, 33, 34,
+	35, 36, 37, 38, 39, 40, 41, 42,
+};
+
 static int __init testfunc(void)
 {
 	int		buf[6];
-	int		i;
+	int		i, j;
 	unsigned int	ret;
 
 	printk(KERN_INFO "int fifo test start\n");
@@ -66,8 +73,13 @@
 	ret = kfifo_in(&test, buf, ret);
 	printk(KERN_INFO "ret: %d\n", ret);
 
-	for (i = 20; i != 30; i++)
-		kfifo_put(&test, &i);
+	/* skip first element of the fifo */
+	printk(KERN_INFO "skip 1st element\n");
+	kfifo_skip(&test);
+
+	/* put values into the fifo until is full */
+	for (i = 20; kfifo_put(&test, &i); i++)
+		;
 
 	printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
 
@@ -75,10 +87,20 @@
 	if (kfifo_peek(&test, &i))
 		printk(KERN_INFO "%d\n", i);
 
-	/* print out all values in the fifo */
-	while (kfifo_get(&test, &i))
-		printk("%d ", i);
-	printk("\n");
+	/* check the correctness of all values in the fifo */
+	j = 0;
+	while (kfifo_get(&test, &i)) {
+		printk(KERN_INFO "item = %d\n", i);
+		if (i != expected_result[j++]) {
+			printk(KERN_WARNING "value mismatch: test failed\n");
+			return -EIO;
+		}
+	}
+	if (j != ARRAY_SIZE(expected_result)) {
+		printk(KERN_WARNING "size mismatch: test failed\n");
+		return -EIO;
+	}
+	printk(KERN_INFO "test passed\n");
 
 	return 0;
 }
@@ -132,7 +154,12 @@
 		return ret;
 	}
 #endif
-	testfunc();
+	if (testfunc() < 0) {
+#ifdef DYNAMIC
+		kfifo_free(&test);
+#endif
+		return -EIO;
+	}
 
 	if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
 #ifdef DYNAMIC
diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
index 32c6e0b..e68bd16 100644
--- a/samples/kfifo/record-example.c
+++ b/samples/kfifo/record-example.c
@@ -55,6 +55,19 @@
 static mytest test;
 #endif
 
+static const char *expected_result[] = {
+	"a",
+	"bb",
+	"ccc",
+	"dddd",
+	"eeeee",
+	"ffffff",
+	"ggggggg",
+	"hhhhhhhh",
+	"iiiiiiiii",
+	"jjjjjjjjjj",
+};
+
 static int __init testfunc(void)
 {
 	char		buf[100];
@@ -75,6 +88,10 @@
 		kfifo_in(&test, buf, i + 1);
 	}
 
+	/* skip first element of the fifo */
+	printk(KERN_INFO "skip 1st element\n");
+	kfifo_skip(&test);
+
 	printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
 
 	/* show the first record without removing from the fifo */
@@ -82,11 +99,22 @@
 	if (ret)
 		printk(KERN_INFO "%.*s\n", ret, buf);
 
-	/* print out all records in the fifo */
+	/* check the correctness of all values in the fifo */
+	i = 0;
 	while (!kfifo_is_empty(&test)) {
 		ret = kfifo_out(&test, buf, sizeof(buf));
-		printk(KERN_INFO "%.*s\n", ret, buf);
+		buf[ret] = '\0';
+		printk(KERN_INFO "item = %.*s\n", ret, buf);
+		if (strcmp(buf, expected_result[i++])) {
+			printk(KERN_WARNING "value mismatch: test failed\n");
+			return -EIO;
+		}
 	}
+	if (i != ARRAY_SIZE(expected_result)) {
+		printk(KERN_WARNING "size mismatch: test failed\n");
+		return -EIO;
+	}
+	printk(KERN_INFO "test passed\n");
 
 	return 0;
 }
@@ -142,7 +170,12 @@
 #else
 	INIT_KFIFO(test);
 #endif
-	testfunc();
+	if (testfunc() < 0) {
+#ifdef DYNAMIC
+		kfifo_free(&test);
+#endif
+		return -EIO;
+	}
 
 	if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) {
 #ifdef DYNAMIC
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index c39327e..515253f 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -497,7 +497,9 @@
 			/*
 			 * If symbol is a choice value and equals to the
 			 * default for a choice - skip.
-			 * But only if value is bool and equal to "y" .
+			 * But only if value is bool and equal to "y" and
+			 * choice is not "optional".
+			 * (If choice is "optional" then all values can be "n")
 			 */
 			if (sym_is_choice_value(sym)) {
 				struct symbol *cs;
@@ -505,7 +507,7 @@
 
 				cs = prop_get_symbol(sym_get_choice_prop(sym));
 				ds = sym_choice_default(cs);
-				if (sym == ds) {
+				if (!sym_is_optional(cs) && sym == ds) {
 					if ((sym->type == S_BOOLEAN) &&
 					    sym_get_tristate_value(sym) == yes)
 						goto next_menu;
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index e95718fe..943712c 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -937,6 +937,8 @@
 		sym = stack->sym;
 		next_sym = stack->next ? stack->next->sym : last_sym;
 		prop = stack->prop;
+		if (prop == NULL)
+			prop = stack->sym->prop;
 
 		/* for choice values find the menu entry (used below) */
 		if (sym_is_choice(sym) || sym_is_choice_value(sym)) {
diff --git a/scripts/mkmakefile b/scripts/mkmakefile
index 67d59c7..5325423 100644
--- a/scripts/mkmakefile
+++ b/scripts/mkmakefile
@@ -44,7 +44,9 @@
 
 Makefile:;
 
-\$(all) %/: all
+\$(all): all
 	@:
 
+%/: all
+	@:
 EOF
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 0171060..e67f054 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -159,6 +159,7 @@
 my $function_regex;	# Find the name of a function
 			#    (return offset and func name)
 my $mcount_regex;	# Find the call site to mcount (return offset)
+my $mcount_adjust;	# Address adjustment to mcount offset
 my $alignment;		# The .align value to use for $mcount_section
 my $section_type;	# Section header plus possible alignment command
 my $can_use_local = 0; 	# If we can use local function references
@@ -213,6 +214,7 @@
 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
 $section_type = '@progbits';
+$mcount_adjust = 0;
 $type = ".long";
 
 if ($arch eq "x86_64") {
@@ -351,6 +353,9 @@
 } elsif ($arch eq "microblaze") {
     # Microblaze calls '_mcount' instead of plain 'mcount'.
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+} elsif ($arch eq "blackfin") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
+    $mcount_adjust = -4;
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
@@ -511,7 +516,7 @@
     }
     # is this a call site to mcount? If so, record it to print later
     if ($text_found && /$mcount_regex/) {
-	push(@offsets, hex $1);
+	push(@offsets, (hex $1) + $mcount_adjust);
     }
 }
 
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index e90a91c..057b6b3 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -43,7 +43,7 @@
 	fi
 
 	# Check for git and a git repo.
-	if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+	if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
 
 		# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
 		# it, because this version is defined in the top level Makefile.
@@ -85,7 +85,7 @@
 	fi
 
 	# Check for mercurial and a mercurial repo.
-	if hgid=`hg id 2>/dev/null`; then
+	if test -d .hg && hgid=`hg id 2>/dev/null`; then
 		tag=`printf '%s' "$hgid" | cut -s -d' ' -f2`
 
 		# Do we have an untagged version?
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index d5666d3..f73e2c2 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -607,8 +607,8 @@
 	return error;
 }
 
-static int apparmor_task_setrlimit(unsigned int resource,
-				   struct rlimit *new_rlim)
+static int apparmor_task_setrlimit(struct task_struct *task,
+		unsigned int resource, struct rlimit *new_rlim)
 {
 	struct aa_profile *profile = aa_current_profile();
 	int error = 0;
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 96bab94..19358dc 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -62,19 +62,14 @@
 	int deleted, connected;
 	int error = 0;
 
-	/* Get the root we want to resolve too */
+	/* Get the root we want to resolve too, released below */
 	if (flags & PATH_CHROOT_REL) {
 		/* resolve paths relative to chroot */
-		read_lock(&current->fs->lock);
-		root = current->fs->root;
-		/* released below */
-		path_get(&root);
-		read_unlock(&current->fs->lock);
+		get_fs_root(current->fs, &root);
 	} else {
 		/* resolve paths relative to namespace */
 		root.mnt = current->nsproxy->mnt_ns->root;
 		root.dentry = root.mnt->mnt_root;
-		/* released below */
 		path_get(&root);
 	}
 
diff --git a/security/commoncap.c b/security/commoncap.c
index 4e01599..9d172e6 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -40,7 +40,7 @@
  *
  * Warn if that happens, once per boot.
  */
-static void warn_setuid_and_fcaps_mixed(char *fname)
+static void warn_setuid_and_fcaps_mixed(const char *fname)
 {
 	static int warned;
 	if (!warned) {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 42043f9..4796ddd 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2170,8 +2170,9 @@
 
 	tty = get_current_tty();
 	if (tty) {
-		file_list_lock();
+		spin_lock(&tty_files_lock);
 		if (!list_empty(&tty->tty_files)) {
+			struct tty_file_private *file_priv;
 			struct inode *inode;
 
 			/* Revalidate access to controlling tty.
@@ -2179,14 +2180,16 @@
 			   than using file_has_perm, as this particular open
 			   file may belong to another process and we are only
 			   interested in the inode-based check here. */
-			file = list_first_entry(&tty->tty_files, struct file, f_u.fu_list);
+			file_priv = list_first_entry(&tty->tty_files,
+						struct tty_file_private, list);
+			file = file_priv->file;
 			inode = file->f_path.dentry->d_inode;
 			if (inode_has_perm(cred, inode,
 					   FILE__READ | FILE__WRITE, NULL)) {
 				drop_tty = 1;
 			}
 		}
-		file_list_unlock();
+		spin_unlock(&tty_files_lock);
 		tty_kref_put(tty);
 	}
 	/* Reset controlling tty. */
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a3b2a64..134fc6c 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -978,6 +978,10 @@
 {
 	if (substream->runtime->trigger_master != substream)
 		return 0;
+	/* some drivers might use hw_ptr to recover from the pause -
+	   update the hw_ptr now */
+	if (push)
+		snd_pcm_update_hw_ptr(substream);
 	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
 	 * a delta betwen the current jiffies, this gives a large enough
 	 * delta, effectively to skip the check once.
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index 4203782..aff8387 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -52,6 +52,7 @@
 static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
 static int enable_ir[SNDRV_CARDS];
 static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
+static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
 
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
@@ -73,6 +74,8 @@
 MODULE_PARM_DESC(enable_ir, "Enable IR.");
 module_param_array(subsystem, uint, NULL, 0444);
 MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
+module_param_array(delay_pcm_irq, uint, NULL, 0444);
+MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
 /*
  * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value  Model:SB0400
  */
@@ -127,6 +130,7 @@
 				      &emu)) < 0)
 		goto error;
 	card->private_data = emu;
+	emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
 	if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
 		goto error;
 	if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 55b83ef..622bace 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -332,7 +332,7 @@
 		evoice->epcm->ccca_start_addr = start_addr + ccis;
 		if (extra) {
 			start_addr += ccis;
-			end_addr += ccis;
+			end_addr += ccis + emu->delay_pcm_irq;
 		}
 		if (stereo && !extra) {
 			snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
@@ -360,7 +360,9 @@
 	/* Assumption that PT is already 0 so no harm overwriting */
 	snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
 	snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
-	snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24));
+	snd_emu10k1_ptr_write(emu, PSST, voice,
+			(start_addr + (extra ? emu->delay_pcm_irq : 0)) |
+			(send_amount[2] << 24));
 	if (emu->card_capabilities->emu_model)
 		pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
 	else 
@@ -732,6 +734,23 @@
 	snd_emu10k1_ptr_write(emu, IP, voice, 0);
 }
 
+static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
+		struct snd_emu10k1_pcm *epcm,
+		struct snd_pcm_substream *substream,
+		struct snd_pcm_runtime *runtime)
+{
+	unsigned int ptr, period_pos;
+
+	/* try to sychronize the current position for the interrupt
+	   source voice */
+	period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
+	period_pos %= runtime->period_size;
+	ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
+	ptr &= ~0x00ffffff;
+	ptr |= epcm->ccca_start_addr + period_pos;
+	snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
+}
+
 static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
 				        int cmd)
 {
@@ -753,6 +772,8 @@
 		/* follow thru */
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 	case SNDRV_PCM_TRIGGER_RESUME:
+		if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
+			snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
 		mix = &emu->pcm_mixer[substream->number];
 		snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
 		snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
@@ -869,8 +890,9 @@
 #endif
 	/*
 	printk(KERN_DEBUG
-	       "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n",
-	       ptr, runtime->buffer_size, runtime->period_size);
+	       "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
+	       (long)ptr, (long)runtime->buffer_size,
+	       (long)runtime->period_size);
 	*/
 	return ptr;
 }
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index ffb1ddb..957a311 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -310,8 +310,10 @@
 	if (snd_BUG_ON(!hdr))
 		return NULL;
 
+	idx = runtime->period_size >= runtime->buffer_size ?
+					(emu->delay_pcm_irq * 2) : 0;
 	mutex_lock(&hdr->block_mutex);
-	blk = search_empty(emu, runtime->dma_bytes);
+	blk = search_empty(emu, runtime->dma_bytes + idx);
 	if (blk == NULL) {
 		mutex_unlock(&hdr->block_mutex);
 		return NULL;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index dd8fb86..3827092 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -589,6 +589,7 @@
 	bus->ops = temp->ops;
 
 	mutex_init(&bus->cmd_mutex);
+	mutex_init(&bus->prepare_mutex);
 	INIT_LIST_HEAD(&bus->codec_list);
 
 	snprintf(bus->workq_name, sizeof(bus->workq_name),
@@ -1068,7 +1069,6 @@
 	codec->addr = codec_addr;
 	mutex_init(&codec->spdif_mutex);
 	mutex_init(&codec->control_mutex);
-	mutex_init(&codec->prepare_mutex);
 	init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info));
 	init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
 	snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32);
@@ -1213,6 +1213,7 @@
 				u32 stream_tag,
 				int channel_id, int format)
 {
+	struct hda_codec *c;
 	struct hda_cvt_setup *p;
 	unsigned int oldval, newval;
 	int i;
@@ -1253,10 +1254,12 @@
 	p->dirty = 0;
 
 	/* make other inactive cvts with the same stream-tag dirty */
-	for (i = 0; i < codec->cvt_setups.used; i++) {
-		p = snd_array_elem(&codec->cvt_setups, i);
-		if (!p->active && p->stream_tag == stream_tag)
-			p->dirty = 1;
+	list_for_each_entry(c, &codec->bus->codec_list, list) {
+		for (i = 0; i < c->cvt_setups.used; i++) {
+			p = snd_array_elem(&c->cvt_setups, i);
+			if (!p->active && p->stream_tag == stream_tag)
+				p->dirty = 1;
+		}
 	}
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_setup_stream);
@@ -1306,12 +1309,16 @@
 /* clean up the all conflicting obsolete streams */
 static void purify_inactive_streams(struct hda_codec *codec)
 {
+	struct hda_codec *c;
 	int i;
 
-	for (i = 0; i < codec->cvt_setups.used; i++) {
-		struct hda_cvt_setup *p = snd_array_elem(&codec->cvt_setups, i);
-		if (p->dirty)
-			really_cleanup_stream(codec, p);
+	list_for_each_entry(c, &codec->bus->codec_list, list) {
+		for (i = 0; i < c->cvt_setups.used; i++) {
+			struct hda_cvt_setup *p;
+			p = snd_array_elem(&c->cvt_setups, i);
+			if (p->dirty)
+				really_cleanup_stream(c, p);
+		}
 	}
 }
 
@@ -3502,11 +3509,11 @@
 			  struct snd_pcm_substream *substream)
 {
 	int ret;
-	mutex_lock(&codec->prepare_mutex);
+	mutex_lock(&codec->bus->prepare_mutex);
 	ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream);
 	if (ret >= 0)
 		purify_inactive_streams(codec);
-	mutex_unlock(&codec->prepare_mutex);
+	mutex_unlock(&codec->bus->prepare_mutex);
 	return ret;
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_prepare);
@@ -3515,9 +3522,9 @@
 			   struct hda_pcm_stream *hinfo,
 			   struct snd_pcm_substream *substream)
 {
-	mutex_lock(&codec->prepare_mutex);
+	mutex_lock(&codec->bus->prepare_mutex);
 	hinfo->ops.cleanup(hinfo, codec, substream);
-	mutex_unlock(&codec->prepare_mutex);
+	mutex_unlock(&codec->bus->prepare_mutex);
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup);
 
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 4303353..62c7022 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -648,6 +648,7 @@
 	struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1];
 
 	struct mutex cmd_mutex;
+	struct mutex prepare_mutex;
 
 	/* unsolicited event queue */
 	struct hda_bus_unsolicited *unsol;
@@ -826,7 +827,6 @@
 
 	struct mutex spdif_mutex;
 	struct mutex control_mutex;
-	struct mutex prepare_mutex;
 	unsigned int spdif_status;	/* IEC958 status bits */
 	unsigned short spdif_ctls;	/* SPDIF control bits */
 	unsigned int spdif_in_enable;	/* SPDIF input enable? */
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 803b298..26c3ade 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -596,6 +596,8 @@
 }
 EXPORT_SYMBOL_HDA(snd_hda_eld_proc_free);
 
+#endif /* CONFIG_PROC_FS */
+
 /* update PCM info based on ELD */
 void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm,
 			      struct hda_pcm_stream *codec_pars)
@@ -644,5 +646,3 @@
 	pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps);
 }
 EXPORT_SYMBOL_HDA(hdmi_eld_update_pcm_info);
-
-#endif /* CONFIG_PROC_FS */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 31b5d9e..5cdb80e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3049,6 +3049,7 @@
 	SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
 		      CXT5066_DELL_LAPTOP),
 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
+	SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
 	SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
 	SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
@@ -3058,6 +3059,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
  	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
  	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
 	{}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 2bc0f07..afd6022 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -707,8 +707,6 @@
 			      u32 stream_tag, int format)
 {
 	struct hdmi_spec *spec = codec->spec;
-	int tag;
-	int fmt;
 	int pinctl;
 	int new_pinctl = 0;
 	int i;
@@ -745,24 +743,7 @@
 		return -EINVAL;
 	}
 
-	tag = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0) >> 4;
-	fmt = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_STREAM_FORMAT, 0);
-
-	snd_printdd("hdmi_setup_stream: "
-		    "NID=0x%x, %sstream=0x%x, %sformat=0x%x\n",
-		    nid,
-		    tag == stream_tag ? "" : "new-",
-		    stream_tag,
-		    fmt == format ? "" : "new-",
-		    format);
-
-	if (tag != stream_tag)
-		snd_hda_codec_write(codec, nid, 0,
-				    AC_VERB_SET_CHANNEL_STREAMID,
-				    stream_tag << 4);
-	if (fmt != format)
-		snd_hda_codec_write(codec, nid, 0,
-				    AC_VERB_SET_STREAM_FORMAT, format);
+	snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format);
 	return 0;
 }
 
diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
index d382d3c..36a9b83 100644
--- a/sound/pci/hda/patch_intelhdmi.c
+++ b/sound/pci/hda/patch_intelhdmi.c
@@ -69,20 +69,12 @@
 	return hdmi_setup_stream(codec, hinfo->nid, stream_tag, format);
 }
 
-static int intel_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
-					   struct hda_codec *codec,
-					   struct snd_pcm_substream *substream)
-{
-	return 0;
-}
-
 static struct hda_pcm_stream intel_hdmi_pcm_playback = {
 	.substreams = 1,
 	.channels_min = 2,
 	.ops = {
 		.open = hdmi_pcm_open,
 		.prepare = intel_hdmi_playback_pcm_prepare,
-		.cleanup = intel_hdmi_playback_pcm_cleanup,
 	},
 };
 
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index f636870..69b950d 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -326,13 +326,6 @@
 	return 0;
 }
 
-static int nvhdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
-					   struct hda_codec *codec,
-					   struct snd_pcm_substream *substream)
-{
-	return 0;
-}
-
 static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo,
 					struct hda_codec *codec,
 					unsigned int stream_tag,
@@ -350,7 +343,6 @@
 	.ops = {
 		.open = hdmi_pcm_open,
 		.prepare = nvhdmi_dig_playback_pcm_prepare_8ch_89,
-		.cleanup = nvhdmi_playback_pcm_cleanup,
 	},
 };
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2cd1ae8..a4dd045 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -19030,6 +19030,7 @@
 /*
  * ALC680 support
  */
+#define ALC680_DIGIN_NID	ALC880_DIGIN_NID
 #define ALC680_DIGOUT_NID	ALC880_DIGOUT_NID
 #define alc680_modes		alc260_modes
 
@@ -19044,23 +19045,93 @@
 	0x07, 0x08, 0x09
 };
 
+/*
+ * Analog capture ADC cgange
+ */
+static int alc680_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
+				      struct hda_codec *codec,
+				      unsigned int stream_tag,
+				      unsigned int format,
+				      struct snd_pcm_substream *substream)
+{
+	struct alc_spec *spec = codec->spec;
+	struct auto_pin_cfg *cfg = &spec->autocfg;
+	unsigned int pre_mic, pre_line;
+
+	pre_mic  = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]);
+	pre_line = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_LINE]);
+
+	spec->cur_adc_stream_tag = stream_tag;
+	spec->cur_adc_format = format;
+
+	if (pre_mic || pre_line) {
+		if (pre_mic)
+			snd_hda_codec_setup_stream(codec, 0x08, stream_tag, 0,
+									format);
+		else
+			snd_hda_codec_setup_stream(codec, 0x09, stream_tag, 0,
+									format);
+	} else
+		snd_hda_codec_setup_stream(codec, 0x07, stream_tag, 0, format);
+	return 0;
+}
+
+static int alc680_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
+				      struct hda_codec *codec,
+				      struct snd_pcm_substream *substream)
+{
+	snd_hda_codec_cleanup_stream(codec, 0x07);
+	snd_hda_codec_cleanup_stream(codec, 0x08);
+	snd_hda_codec_cleanup_stream(codec, 0x09);
+	return 0;
+}
+
+static struct hda_pcm_stream alc680_pcm_analog_auto_capture = {
+	.substreams = 1, /* can be overridden */
+	.channels_min = 2,
+	.channels_max = 2,
+	/* NID is set in alc_build_pcms */
+	.ops = {
+		.prepare = alc680_capture_pcm_prepare,
+		.cleanup = alc680_capture_pcm_cleanup
+	},
+};
+
 static struct snd_kcontrol_new alc680_base_mixer[] = {
 	/* output mixer control */
 	HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
-static struct snd_kcontrol_new alc680_capture_mixer[] = {
-	HDA_CODEC_VOLUME("Capture Volume", 0x07, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Capture Switch", 0x07, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME_IDX("Capture Volume", 2, 0x09, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE_IDX("Capture Switch", 2, 0x09, 0x0, HDA_INPUT),
+static struct hda_bind_ctls alc680_bind_cap_vol = {
+	.ops = &snd_hda_bind_vol,
+	.values = {
+		HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT),
+		HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT),
+		HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
+		0
+	},
+};
+
+static struct hda_bind_ctls alc680_bind_cap_switch = {
+	.ops = &snd_hda_bind_sw,
+	.values = {
+		HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT),
+		HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT),
+		HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
+		0
+	},
+};
+
+static struct snd_kcontrol_new alc680_master_capture_mixer[] = {
+	HDA_BIND_VOL("Capture Volume", &alc680_bind_cap_vol),
+	HDA_BIND_SW("Capture Switch", &alc680_bind_cap_switch),
 	{ } /* end */
 };
 
@@ -19068,25 +19139,73 @@
  * generic initialization of ADC, input mixers and output mixers
  */
 static struct hda_verb alc680_init_verbs[] = {
-	/* Unmute DAC0-1 and set vol = 0 */
-	{0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-	{0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-	{0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+	{0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
 
-	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40},
-	{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40},
-	{0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0},
-	{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24},
-	{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20},
+	{0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+	{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+	{0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+	{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+	{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
 
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
 	{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
 	{0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
 	{0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+
+	{0x16, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT   | AC_USRSP_EN},
+	{0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT  | AC_USRSP_EN},
+
 	{ }
 };
 
+/* toggle speaker-output according to the hp-jack state */
+static void alc680_base_setup(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+
+	spec->autocfg.hp_pins[0] = 0x16;
+	spec->autocfg.speaker_pins[0] = 0x14;
+	spec->autocfg.speaker_pins[1] = 0x15;
+	spec->autocfg.input_pins[AUTO_PIN_MIC] = 0x18;
+	spec->autocfg.input_pins[AUTO_PIN_LINE] = 0x19;
+}
+
+static void alc680_rec_autoswitch(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+	struct auto_pin_cfg *cfg = &spec->autocfg;
+	unsigned int present;
+	hda_nid_t new_adc;
+
+	present = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]);
+
+	new_adc = present ? 0x8 : 0x7;
+	__snd_hda_codec_cleanup_stream(codec, !present ? 0x8 : 0x7, 1);
+	snd_hda_codec_setup_stream(codec, new_adc,
+				   spec->cur_adc_stream_tag, 0,
+				   spec->cur_adc_format);
+
+}
+
+static void alc680_unsol_event(struct hda_codec *codec,
+					   unsigned int res)
+{
+	if ((res >> 26) == ALC880_HP_EVENT)
+		alc_automute_amp(codec);
+	if ((res >> 26) == ALC880_MIC_EVENT)
+		alc680_rec_autoswitch(codec);
+}
+
+static void alc680_inithook(struct hda_codec *codec)
+{
+	alc_automute_amp(codec);
+	alc680_rec_autoswitch(codec);
+}
+
 /* create input playback/capture controls for the given pin */
 static int alc680_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
 				    const char *ctlname, int idx)
@@ -19197,13 +19316,7 @@
 #define alc680_pcm_analog_capture	alc880_pcm_analog_capture
 #define alc680_pcm_analog_alt_capture	alc880_pcm_analog_alt_capture
 #define alc680_pcm_digital_playback	alc880_pcm_digital_playback
-
-static struct hda_input_mux alc680_capture_source = {
-	.num_items = 1,
-	.items = {
-		{ "Mic", 0x0 },
-	},
-};
+#define alc680_pcm_digital_capture	alc880_pcm_digital_capture
 
 /*
  * BIOS auto configuration
@@ -19218,6 +19331,7 @@
 					   alc680_ignore);
 	if (err < 0)
 		return err;
+
 	if (!spec->autocfg.line_outs) {
 		if (spec->autocfg.dig_outs || spec->autocfg.dig_in_pin) {
 			spec->multiout.max_channels = 2;
@@ -19239,8 +19353,6 @@
 		add_mixer(spec, spec->kctls.list);
 
 	add_verb(spec, alc680_init_verbs);
-	spec->num_mux_defs = 1;
-	spec->input_mux = &alc680_capture_source;
 
 	err = alc_auto_add_mic_boost(codec);
 	if (err < 0)
@@ -19279,17 +19391,17 @@
 static struct alc_config_preset alc680_presets[] = {
 	[ALC680_BASE] = {
 		.mixers = { alc680_base_mixer },
-		.cap_mixer =  alc680_capture_mixer,
+		.cap_mixer =  alc680_master_capture_mixer,
 		.init_verbs = { alc680_init_verbs },
 		.num_dacs = ARRAY_SIZE(alc680_dac_nids),
 		.dac_nids = alc680_dac_nids,
-		.num_adc_nids = ARRAY_SIZE(alc680_adc_nids),
-		.adc_nids = alc680_adc_nids,
-		.hp_nid = 0x04,
 		.dig_out_nid = ALC680_DIGOUT_NID,
 		.num_channel_mode = ARRAY_SIZE(alc680_modes),
 		.channel_mode = alc680_modes,
-		.input_mux = &alc680_capture_source,
+		.unsol_event = alc680_unsol_event,
+		.setup = alc680_base_setup,
+		.init_hook = alc680_inithook,
+
 	},
 };
 
@@ -19333,9 +19445,9 @@
 		setup_preset(codec, &alc680_presets[board_config]);
 
 	spec->stream_analog_playback = &alc680_pcm_analog_playback;
-	spec->stream_analog_capture = &alc680_pcm_analog_capture;
-	spec->stream_analog_alt_capture = &alc680_pcm_analog_alt_capture;
+	spec->stream_analog_capture = &alc680_pcm_analog_auto_capture;
 	spec->stream_digital_playback = &alc680_pcm_digital_playback;
+	spec->stream_digital_capture = &alc680_pcm_digital_capture;
 
 	if (!spec->adc_nids) {
 		spec->adc_nids = alc680_adc_nids;
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index f3f861b..95148e5 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -6303,6 +6303,21 @@
 	{ .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx },
 	{ .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx },
 	{ .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx },
+	{ .id = 0x111d76c0, .name = "92HD89C3", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c1, .name = "92HD89C2", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c2, .name = "92HD89C1", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c3, .name = "92HD89B3", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c4, .name = "92HD89B2", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c5, .name = "92HD89B1", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c6, .name = "92HD89E3", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c7, .name = "92HD89E2", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c8, .name = "92HD89E1", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76c9, .name = "92HD89D3", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76ca, .name = "92HD89D2", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76cb, .name = "92HD89D1", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
+	{ .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
 	{} /* terminator */
 };
 
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 6433e65..4677492 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1776,6 +1776,12 @@
         },
 	{
 		.subvendor = 0x1014,
+		.subdevice = 0x0534,
+		.name = "ThinkPad X31",
+		.type = AC97_TUNE_INV_EAPD
+	},
+	{
+		.subvendor = 0x1014,
 		.subdevice = 0x1f00,
 		.name = "MS-9128",
 		.type = AC97_TUNE_ALC_JACK
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index f64fb7d..ad5202e 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1224,15 +1224,14 @@
 		    firmware.firmware.ASIC, firmware.firmware.CODEC,
 		    firmware.firmware.AUXDSP, firmware.firmware.PROG);
 
+	if (!chip)
+		return 1;
+
 	for (i = 0; i < FIRMWARE_VERSIONS; i++) {
 		if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
-			break;
-	}
-	if (i >= FIRMWARE_VERSIONS)
-		return 0; /* no match */
+			return 1; /* OK */
 
-	if (!chip)
-		return 1; /* OK */
+	}
 
 	snd_printdd("Writing Firmware\n");
 	if (!chip->fw_entry) {
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 4e212ed..f8154e6 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -178,13 +178,6 @@
 	case SND_SOC_DAIFMT_LEFT_J:
 		iface |= 0x0001;
 		break;
-		/* FIXME: CHECK A/B */
-	case SND_SOC_DAIFMT_DSP_A:
-		iface |= 0x0003;
-		break;
-	case SND_SOC_DAIFMT_DSP_B:
-		iface |= 0x0007;
-		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index a11daa1..c81da05 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -254,6 +254,9 @@
 		dma_data = &ssi->dma_params_rx;
 	}
 
+	if (ssi->flags & IMX_SSI_SYN)
+		reg = SSI_STCCR;
+
 	snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
 
 	sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 41abb90..4f1fa77 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -5,6 +5,12 @@
 # The default target of this Makefile is...
 all::
 
+ifneq ($(OUTPUT),)
+# check that the output directory actually exists
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
+endif
+
 # Define V=1 to have a more verbose compile.
 # Define V=2 to have an even more verbose compile.
 #
@@ -157,10 +163,6 @@
 #
 # Define NO_DWARF if you do not want debug-info analysis feature at all.
 
-$(shell sh -c 'mkdir -p $(OUTPUT)scripts/{perl,python}/Perf-Trace-Util/' 2> /dev/null)
-$(shell sh -c 'mkdir -p $(OUTPUT)util/{ui/browsers,scripting-engines}/' 2> /dev/null)
-$(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null)
-
 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
 	@$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
 -include $(OUTPUT)PERF-VERSION-FILE
@@ -186,8 +188,6 @@
         ARCH := x86
 endif
 
-$(shell sh -c 'mkdir -p $(OUTPUT)arch/$(ARCH)/util/' 2> /dev/null)
-
 # CFLAGS and LDFLAGS are for the users to override from the command line.
 
 #
@@ -268,6 +268,7 @@
 CC = $(CROSS_COMPILE)gcc
 AR = $(CROSS_COMPILE)ar
 RM = rm -f
+MKDIR = mkdir
 TAR = tar
 FIND = find
 INSTALL = install
@@ -838,6 +839,7 @@
 	QUIET_CC       = @echo '   ' CC $@;
 	QUIET_AR       = @echo '   ' AR $@;
 	QUIET_LINK     = @echo '   ' LINK $@;
+	QUIET_MKDIR    = @echo '   ' MKDIR $@;
 	QUIET_BUILT_IN = @echo '   ' BUILTIN $@;
 	QUIET_GEN      = @echo '   ' GEN $@;
 	QUIET_SUBDIR0  = +@subdir=
@@ -935,15 +937,15 @@
 	$(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
 
 $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
-	$(QUIET_GEN)$(RM) $@ $@+ && \
+	$(QUIET_GEN)$(RM) $(OUTPUT)$@ $(OUTPUT)$@+ && \
 	sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
 	    -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \
 	    -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \
 	    -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
 	    -e 's/@@NO_CURL@@/$(NO_CURL)/g' \
-	    $@.sh >$@+ && \
-	chmod +x $@+ && \
-	mv $@+ $(OUTPUT)$@
+	    $@.sh > $(OUTPUT)$@+ && \
+	chmod +x $(OUTPUT)$@+ && \
+	mv $(OUTPUT)$@+ $(OUTPUT)$@
 
 configure: configure.ac
 	$(QUIET_GEN)$(RM) $@ $<+ && \
@@ -1012,6 +1014,14 @@
 $(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
 builtin-revert.o wt-status.o: wt-status.h
 
+# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
+# we depend the various files onto their directories.
+DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
+$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
+# In the second step, we make a rule to actually create these directories
+$(sort $(dir $(DIRECTORY_DEPS))):
+	$(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
+
 $(LIB_FILE): $(LIB_OBJS)
 	$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
 
diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak
index ddb68e6..7a7b608 100644
--- a/tools/perf/feature-tests.mak
+++ b/tools/perf/feature-tests.mak
@@ -113,7 +113,7 @@
 # try-cc
 # Usage: option = $(call try-cc, source-to-build, cc-options)
 try-cc = $(shell sh -c						  \
-	'TMP="$(TMPOUT).$$$$";			 		  \
+	'TMP="$(OUTPUT)$(TMPOUT).$$$$";				  \
 	 echo "$(1)" |						  \
 	 $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \
 	 rm -f "$$TMP"')
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 55ff792..a90273e 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -146,6 +146,7 @@
 		return -1;
 
 	newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
+	newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
 
 	nd = self->curr_hot;
 	if (nd) {
@@ -178,7 +179,7 @@
 	}
 out:
 	ui_browser__hide(&self->b);
-	return 0;
+	return es->u.key;
 }
 
 int hist_entry__tui_annotate(struct hist_entry *self)