Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace

This is an initial merge in of Eric Biederman's work to start adding
user namespace support to the networking.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
index 814b013..b31e782 100644
--- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
@@ -5,4 +5,15 @@
 Description:
 		Control the power of camera module. 1 means on, 0 means off.
 
+What:		/sys/devices/platform/ideapad/fan_mode
+Date:		June 2012
+KernelVersion:	3.6
+Contact:	"Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+		Change fan mode
+		There are four available modes:
+			* 0 -> Super Silent Mode
+			* 1 -> Standard Mode
+			* 2 -> Dust Cleaning
+			* 4 -> Efficient Thermal Dissipation Mode
 
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 3fca32c..25b58ef 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -224,8 +224,8 @@
 </para>
 
 <para>
-Then at umount time , in your put_super() (2.4) or write_super() (2.5)
-you can then call journal_destroy() to clean up your in-core journal object.
+Then at umount time , in your put_super() you can then call journal_destroy()
+to clean up your in-core journal object.
 </para>
 
 <para>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
index 7203951..701138f 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
@@ -125,7 +125,7 @@
 <constant>V4L2_TUNER_CAP_NORM</constant> flags can't be used.</para>
 <para>If multiple frequency bands are supported, then
 <structfield>capability</structfield> is the union of all
-<structfield>capability></structfield> fields of each &v4l2-frequency-band;.
+<structfield>capability</structfield> fields of each &v4l2-frequency-band;.
 </para></entry>
 	  </row>
 	  <row>
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
new file mode 100644
index 0000000..dcaabe9
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -0,0 +1,109 @@
+TI SoC Ethernet Switch Controller Device Tree Bindings
+------------------------------------------------------
+
+Required properties:
+- compatible		: Should be "ti,cpsw"
+- reg			: physical base address and size of the cpsw
+			  registers map
+- interrupts		: property with a value describing the interrupt
+			  number
+- interrupt-parent	: The parent interrupt controller
+- cpdma_channels 	: Specifies number of channels in CPDMA
+- host_port_no		: Specifies host port shift
+- cpdma_reg_ofs		: Specifies CPDMA submodule register offset
+- cpdma_sram_ofs	: Specifies CPDMA SRAM offset
+- ale_reg_ofs		: Specifies ALE submodule register offset
+- ale_entries		: Specifies No of entries ALE can hold
+- host_port_reg_ofs	: Specifies host port register offset
+- hw_stats_reg_ofs	: Specifies hardware statistics register offset
+- bd_ram_ofs		: Specifies internal desciptor RAM offset
+- bd_ram_size		: Specifies internal descriptor RAM size
+- rx_descs		: Specifies number of Rx descriptors
+- mac_control		: Specifies Default MAC control register content
+			  for the specific platform
+- slaves		: Specifies number for slaves
+- slave_reg_ofs		: Specifies slave register offset
+- sliver_reg_ofs	: Specifies slave sliver register offset
+- phy_id		: Specifies slave phy id
+- mac-address		: Specifies slave MAC address
+
+Optional properties:
+- ti,hwmods		: Must be "cpgmac0"
+- no_bd_ram		: Must be 0 or 1
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples:
+
+	mac: ethernet@4A100000 {
+		compatible = "ti,cpsw";
+		reg = <0x4A100000 0x1000>;
+		interrupts = <55 0x4>;
+		interrupt-parent = <&intc>;
+		cpdma_channels = <8>;
+		host_port_no = <0>;
+		cpdma_reg_ofs = <0x800>;
+		cpdma_sram_ofs = <0xa00>;
+		ale_reg_ofs = <0xd00>;
+		ale_entries = <1024>;
+		host_port_reg_ofs = <0x108>;
+		hw_stats_reg_ofs = <0x900>;
+		bd_ram_ofs = <0x2000>;
+		bd_ram_size = <0x2000>;
+		no_bd_ram = <0>;
+		rx_descs = <64>;
+		mac_control = <0x20>;
+		slaves = <2>;
+		cpsw_emac0: slave@0 {
+			slave_reg_ofs = <0x208>;
+			sliver_reg_ofs = <0xd80>;
+			phy_id = "davinci_mdio.16:00";
+			/* Filled in by U-Boot */
+			mac-address = [ 00 00 00 00 00 00 ];
+		};
+		cpsw_emac1: slave@1 {
+			slave_reg_ofs = <0x308>;
+			sliver_reg_ofs = <0xdc0>;
+			phy_id = "davinci_mdio.16:01";
+			/* Filled in by U-Boot */
+			mac-address = [ 00 00 00 00 00 00 ];
+		};
+	};
+
+(or)
+	mac: ethernet@4A100000 {
+		compatible = "ti,cpsw";
+		ti,hwmods = "cpgmac0";
+		cpdma_channels = <8>;
+		host_port_no = <0>;
+		cpdma_reg_ofs = <0x800>;
+		cpdma_sram_ofs = <0xa00>;
+		ale_reg_ofs = <0xd00>;
+		ale_entries = <1024>;
+		host_port_reg_ofs = <0x108>;
+		hw_stats_reg_ofs = <0x900>;
+		bd_ram_ofs = <0x2000>;
+		bd_ram_size = <0x2000>;
+		no_bd_ram = <0>;
+		rx_descs = <64>;
+		mac_control = <0x20>;
+		slaves = <2>;
+		cpsw_emac0: slave@0 {
+			slave_reg_ofs = <0x208>;
+			sliver_reg_ofs = <0xd80>;
+			phy_id = "davinci_mdio.16:00";
+			/* Filled in by U-Boot */
+			mac-address = [ 00 00 00 00 00 00 ];
+		};
+		cpsw_emac1: slave@1 {
+			slave_reg_ofs = <0x308>;
+			sliver_reg_ofs = <0xdc0>;
+			phy_id = "davinci_mdio.16:01";
+			/* Filled in by U-Boot */
+			mac-address = [ 00 00 00 00 00 00 ];
+		};
+	};
diff --git a/Documentation/devicetree/bindings/net/davinci-mdio.txt b/Documentation/devicetree/bindings/net/davinci-mdio.txt
new file mode 100644
index 0000000..72efaaf
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/davinci-mdio.txt
@@ -0,0 +1,33 @@
+TI SoC Davinci MDIO Controller Device Tree Bindings
+---------------------------------------------------
+
+Required properties:
+- compatible		: Should be "ti,davinci_mdio"
+- reg			: physical base address and size of the davinci mdio
+			  registers map
+- bus_freq		: Mdio Bus frequency
+
+Optional properties:
+- ti,hwmods		: Must be "davinci_mdio"
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples:
+
+	mdio: davinci_mdio@4A101000 {
+		compatible = "ti,cpsw";
+		reg = <0x4A101000 0x1000>;
+		bus_freq = <1000000>;
+	};
+
+(or)
+
+	mdio: davinci_mdio@4A101000 {
+		compatible = "ti,cpsw";
+		ti,hwmods = "davinci_mdio";
+		bus_freq = <1000000>;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/tps6586x.txt b/Documentation/devicetree/bindings/regulator/tps6586x.txt
index d156e1b..da80c2a 100644
--- a/Documentation/devicetree/bindings/regulator/tps6586x.txt
+++ b/Documentation/devicetree/bindings/regulator/tps6586x.txt
@@ -9,9 +9,9 @@
 - regulators: list of regulators provided by this controller, must have
   property "regulator-compatible" to match their hardware counterparts:
   sm[0-2], ldo[0-9] and ldo_rtc
-- sm0-supply: The input supply for the SM0.
-- sm1-supply: The input supply for the SM1.
-- sm2-supply: The input supply for the SM2.
+- vin-sm0-supply: The input supply for the SM0.
+- vin-sm1-supply: The input supply for the SM1.
+- vin-sm2-supply: The input supply for the SM2.
 - vinldo01-supply: The input supply for the LDO1 and LDO2
 - vinldo23-supply: The input supply for the LDO2 and LDO3
 - vinldo4-supply: The input supply for the LDO4
@@ -30,9 +30,9 @@
 		#gpio-cells = <2>;
 		gpio-controller;
 
-		sm0-supply = <&some_reg>;
-		sm1-supply = <&some_reg>;
-		sm2-supply = <&some_reg>;
+		vin-sm0-supply = <&some_reg>;
+		vin-sm1-supply = <&some_reg>;
+		vin-sm2-supply = <&some_reg>;
 		vinldo01-supply = <...>;
 		vinldo23-supply = <...>;
 		vinldo4-supply = <...>;
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 0f103e3..e540a24 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -114,7 +114,6 @@
 	int (*drop_inode) (struct inode *);
 	void (*evict_inode) (struct inode *);
 	void (*put_super) (struct super_block *);
-	void (*write_super) (struct super_block *);
 	int (*sync_fs)(struct super_block *sb, int wait);
 	int (*freeze_fs) (struct super_block *);
 	int (*unfreeze_fs) (struct super_block *);
@@ -136,7 +135,6 @@
 drop_inode:				!!!inode->i_lock!!!
 evict_inode:
 put_super:		write
-write_super:		read
 sync_fs:		read
 freeze_fs:		write
 unfreeze_fs:		write
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 2bef2b3..0742fee 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -94,9 +94,8 @@
 ---
 [mandatory]
 
-BKL is also moved from around sb operations.  ->write_super() Is now called 
-without BKL held.  BKL should have been shifted into individual fs sb_op
-functions.  If you don't need it, remove it.  
+BKL is also moved from around sb operations. BKL should have been shifted into
+individual fs sb_op functions.  If you don't need it, remove it.
 
 ---
 [informational]
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index ead764b..de1e6c4 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -137,6 +137,17 @@
 		 without doing anything or remount the partition in
 		 read-only mode (default behavior).
 
+discard       -- If set, issues discard/TRIM commands to the block
+		 device when blocks are freed. This is useful for SSD devices
+		 and sparse/thinly-provisoned LUNs.
+
+nfs           -- This option maintains an index (cache) of directory
+		 inodes by i_logstart which is used by the nfs-related code to
+		 improve look-ups.
+
+		 Enable this only if you want to export the FAT filesystem
+		 over NFS
+
 <bool>: 0,1,yes,no,true,false
 
 TODO
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 065aa2d..2ee133e 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -216,7 +216,6 @@
         void (*drop_inode) (struct inode *);
         void (*delete_inode) (struct inode *);
         void (*put_super) (struct super_block *);
-        void (*write_super) (struct super_block *);
         int (*sync_fs)(struct super_block *sb, int wait);
         int (*freeze_fs) (struct super_block *);
         int (*unfreeze_fs) (struct super_block *);
@@ -273,9 +272,6 @@
   put_super: called when the VFS wishes to free the superblock
 	(i.e. unmount). This is called with the superblock lock held
 
-  write_super: called when the VFS superblock needs to be written to
-	disc. This method is optional
-
   sync_fs: called when VFS is writing out all dirty data associated with
   	a superblock. The second parameter indicates whether the method
 	should wait until the write out has been completed. Optional.
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 0bf25ee..4ebbfc3 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -262,9 +262,9 @@
 
 #
 # Allowed dirty background ratio, in percent.  Once DIRTY_RATIO has been
-# exceeded, the kernel will wake pdflush which will then reduce the amount
-# of dirty memory to dirty_background_ratio.  Set this nice and low, so once
-# some writeout has commenced, we do a lot of it.
+# exceeded, the kernel will wake flusher threads which will then reduce the
+# amount of dirty memory to dirty_background_ratio.  Set this nice and low,
+# so once some writeout has commenced, we do a lot of it.
 #
 #DIRTY_BACKGROUND_RATIO=5
 
@@ -384,9 +384,9 @@
 
 #
 # Allowed dirty background ratio, in percent.  Once DIRTY_RATIO has been
-# exceeded, the kernel will wake pdflush which will then reduce the amount
-# of dirty memory to dirty_background_ratio.  Set this nice and low, so once
-# some writeout has commenced, we do a lot of it.
+# exceeded, the kernel will wake flusher threads which will then reduce the
+# amount of dirty memory to dirty_background_ratio.  Set this nice and low,
+# so once some writeout has commenced, we do a lot of it.
 #
 DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'}
 
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 8f3ae4a..a173d2a 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -75,9 +75,10 @@
 
 There is a special folder for debugging information:
 
-#  ls /sys/kernel/debug/batman_adv/bat0/
-# bla_claim_table    log                socket             transtable_local
-# gateways           originators        transtable_global  vis_data
+# ls /sys/kernel/debug/batman_adv/bat0/
+# bla_backbone_table  log                 transtable_global
+# bla_claim_table     originators         transtable_local
+# gateways            socket              vis_data
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 6b1c711..10a015c 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -752,12 +752,22 @@
 		protocol information to generate the hash.
 
 		Uses XOR of hardware MAC addresses and IP addresses to
-		generate the hash.  The formula is
+		generate the hash.  The IPv4 formula is
 
 		(((source IP XOR dest IP) AND 0xffff) XOR
 			( source MAC XOR destination MAC ))
 				modulo slave count
 
+		The IPv6 formula is
+
+		hash = (source ip quad 2 XOR dest IP quad 2) XOR
+		       (source ip quad 3 XOR dest IP quad 3) XOR
+		       (source ip quad 4 XOR dest IP quad 4)
+
+		(((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
+			XOR (source MAC XOR destination MAC))
+				modulo slave count
+
 		This algorithm will place all traffic to a particular
 		network peer on the same slave.  For non-IP traffic,
 		the formula is the same as for the layer2 transmit
@@ -778,19 +788,29 @@
 		slaves, although a single connection will not span
 		multiple slaves.
 
-		The formula for unfragmented TCP and UDP packets is
+		The formula for unfragmented IPv4 TCP and UDP packets is
 
 		((source port XOR dest port) XOR
 			 ((source IP XOR dest IP) AND 0xffff)
 				modulo slave count
 
-		For fragmented TCP or UDP packets and all other IP
-		protocol traffic, the source and destination port
+		The formula for unfragmented IPv6 TCP and UDP packets is
+
+		hash = (source port XOR dest port) XOR
+		       ((source ip quad 2 XOR dest IP quad 2) XOR
+			(source ip quad 3 XOR dest IP quad 3) XOR
+			(source ip quad 4 XOR dest IP quad 4))
+
+		((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
+			modulo slave count
+
+		For fragmented TCP or UDP packets and all other IPv4 and
+		IPv6 protocol traffic, the source and destination port
 		information is omitted.  For non-IP traffic, the
 		formula is the same as for the layer2 transmit hash
 		policy.
 
-		This policy is intended to mimic the behavior of
+		The IPv4 policy is intended to mimic the behavior of
 		certain switches, notably Cisco switches with PFC2 as
 		well as some Foundry and IBM products.
 
diff --git a/Documentation/networking/netconsole.txt b/Documentation/networking/netconsole.txt
index 8d02207..2e9e0ae2 100644
--- a/Documentation/networking/netconsole.txt
+++ b/Documentation/networking/netconsole.txt
@@ -51,8 +51,23 @@
 initialized and attempts to bring up the supplied dev at the supplied
 address.
 
-The remote host can run either 'netcat -u -l -p <port>',
-'nc -l -u <port>' or syslogd.
+The remote host has several options to receive the kernel messages,
+for example:
+
+1) syslogd
+
+2) netcat
+
+   On distributions using a BSD-based netcat version (e.g. Fedora,
+   openSUSE and Ubuntu) the listening port must be specified without
+   the -p switch:
+
+   'nc -u -l -p <port>' / 'nc -u -l <port>' or
+   'netcat -u -l -p <port>' / 'netcat -u -l <port>'
+
+3) socat
+
+   'socat udp-recv:<port> -'
 
 Dynamic reconfiguration:
 ========================
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index e40f4b4..1479aca 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -840,9 +840,9 @@
 
 static struct pinctrl_map __initdata mapping[] = {
 	PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"),
-	PIN_MAP_MUX_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs),
-	PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs),
-	PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs),
+	PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs),
+	PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs),
+	PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs),
 };
 
 Finally, some devices expect the mapping table to contain certain specific
diff --git a/Documentation/security/Yama.txt b/Documentation/security/Yama.txt
index e369de2..dd908cf 100644
--- a/Documentation/security/Yama.txt
+++ b/Documentation/security/Yama.txt
@@ -46,14 +46,13 @@
 so that any otherwise allowed process (even those in external pid namespaces)
 may attach.
 
-These restrictions do not change how ptrace via PTRACE_TRACEME operates.
-
-The sysctl settings are:
+The sysctl settings (writable only with CAP_SYS_PTRACE) are:
 
 0 - classic ptrace permissions: a process can PTRACE_ATTACH to any other
     process running under the same uid, as long as it is dumpable (i.e.
     did not transition uids, start privileged, or have called
-    prctl(PR_SET_DUMPABLE...) already).
+    prctl(PR_SET_DUMPABLE...) already). Similarly, PTRACE_TRACEME is
+    unchanged.
 
 1 - restricted ptrace: a process must have a predefined relationship
     with the inferior it wants to call PTRACE_ATTACH on. By default,
@@ -61,12 +60,13 @@
     classic criteria is also met. To change the relationship, an
     inferior can call prctl(PR_SET_PTRACER, debugger, ...) to declare
     an allowed debugger PID to call PTRACE_ATTACH on the inferior.
+    Using PTRACE_TRACEME is unchanged.
 
 2 - admin-only attach: only processes with CAP_SYS_PTRACE may use ptrace
-    with PTRACE_ATTACH.
+    with PTRACE_ATTACH, or through children calling PTRACE_TRACEME.
 
-3 - no attach: no processes may use ptrace with PTRACE_ATTACH. Once set,
-    this sysctl cannot be changed to a lower value.
+3 - no attach: no processes may use ptrace with PTRACE_ATTACH nor via
+    PTRACE_TRACEME. Once set, this sysctl value cannot be changed.
 
 The original children-only logic was based on the restrictions in grsecurity.
 
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index dcc2a94..078701f 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -76,8 +76,8 @@
 
 dirty_background_bytes
 
-Contains the amount of dirty memory at which the pdflush background writeback
-daemon will start writeback.
+Contains the amount of dirty memory at which the background kernel
+flusher threads will start writeback.
 
 Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
 one of them may be specified at a time. When one sysctl is written it is
@@ -89,7 +89,7 @@
 dirty_background_ratio
 
 Contains, as a percentage of total system memory, the number of pages at which
-the pdflush background writeback daemon will start writing out dirty data.
+the background kernel flusher threads will start writing out dirty data.
 
 ==============================================================
 
@@ -112,9 +112,9 @@
 dirty_expire_centisecs
 
 This tunable is used to define when dirty data is old enough to be eligible
-for writeout by the pdflush daemons.  It is expressed in 100'ths of a second.
-Data which has been dirty in-memory for longer than this interval will be
-written out next time a pdflush daemon wakes up.
+for writeout by the kernel flusher threads.  It is expressed in 100'ths
+of a second.  Data which has been dirty in-memory for longer than this
+interval will be written out next time a flusher thread wakes up.
 
 ==============================================================
 
@@ -128,7 +128,7 @@
 
 dirty_writeback_centisecs
 
-The pdflush writeback daemons will periodically wake up and write `old' data
+The kernel flusher threads will periodically wake up and write `old' data
 out to disk.  This tunable expresses the interval between those wakeups, in
 100'ths of a second.
 
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index f8551b3..4ac359b 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -299,11 +299,17 @@
 *******************************************************************
 
 /*
- * hugepage-shm:  see Documentation/vm/hugepage-shm.c
+ * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
  */
 
 *******************************************************************
 
 /*
- * hugepage-mmap:  see Documentation/vm/hugepage-mmap.c
+ * hugepage-shm:  see tools/testing/selftests/vm/hugepage-shm.c
+ */
+
+*******************************************************************
+
+/*
+ * hugepage-mmap:  see tools/testing/selftests/vm/hugepage-mmap.c
  */
diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm
index 0403aaa..874a8ca 100644
--- a/Documentation/w1/slaves/w1_therm
+++ b/Documentation/w1/slaves/w1_therm
@@ -3,6 +3,7 @@
 
 Supported chips:
   * Maxim ds18*20 based temperature sensors.
+  * Maxim ds1825 based temperature sensors.
 
 Author: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
 
@@ -15,6 +16,7 @@
 W1_THERM_DS18S20	0x10
 W1_THERM_DS1822		0x22
 W1_THERM_DS18B20	0x28
+W1_THERM_DS1825		0x3B
 
 Support is provided through the sysfs w1_slave file.  Each open and
 read sequence will initiate a temperature conversion then provide two
diff --git a/MAINTAINERS b/MAINTAINERS
index 94b823f..fdc0119 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -827,24 +827,24 @@
 
 ARM/INTEL IOP32X ARM ARCHITECTURE
 M:	Lennert Buytenhek <kernel@wantstofly.org>
-M:	Dan Williams <dan.j.williams@intel.com>
+M:	Dan Williams <djbw@fb.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
 ARM/INTEL IOP33X ARM ARCHITECTURE
-M:	Dan Williams <dan.j.williams@intel.com>
+M:	Dan Williams <djbw@fb.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
 ARM/INTEL IOP13XX ARM ARCHITECTURE
 M:	Lennert Buytenhek <kernel@wantstofly.org>
-M:	Dan Williams <dan.j.williams@intel.com>
+M:	Dan Williams <djbw@fb.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
 ARM/INTEL IQ81342EX MACHINE SUPPORT
 M:	Lennert Buytenhek <kernel@wantstofly.org>
-M:	Dan Williams <dan.j.williams@intel.com>
+M:	Dan Williams <djbw@fb.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
@@ -869,7 +869,7 @@
 
 ARM/INTEL XSC3 (MANZANO) ARM CORE
 M:	Lennert Buytenhek <kernel@wantstofly.org>
-M:	Dan Williams <dan.j.williams@intel.com>
+M:	Dan Williams <djbw@fb.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
@@ -925,14 +925,14 @@
 
 ARM/NOMADIK ARCHITECTURE
 M:	Alessandro Rubini <rubini@unipv.it>
-M:	Linus Walleij <linus.walleij@stericsson.com>
+M:	Linus Walleij <linus.walleij@linaro.org>
 M:	STEricsson <STEricsson_nomadik_linux@list.st.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-nomadik/
 F:	arch/arm/plat-nomadik/
 F:	drivers/i2c/busses/i2c-nomadik.c
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
 
 ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
 M:	Nelson Castillo <arhuaco@freaks-unidos.net>
@@ -1146,7 +1146,7 @@
 F:	drivers/video/nuc900fb.c
 
 ARM/U300 MACHINE SUPPORT
-M:	Linus Walleij <linus.walleij@stericsson.com>
+M:	Linus Walleij <linus.walleij@linaro.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Supported
 F:	arch/arm/mach-u300/
@@ -1161,15 +1161,20 @@
 
 ARM/Ux500 ARM ARCHITECTURE
 M:	Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
-M:	Linus Walleij <linus.walleij@stericsson.com>
+M:	Linus Walleij <linus.walleij@linaro.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-ux500/
+F:	drivers/clocksource/clksrc-dbx500-prcmu.c
 F:	drivers/dma/ste_dma40*
+F:	drivers/hwspinlock/u8500_hsem.c
 F:	drivers/mfd/abx500*
 F:	drivers/mfd/ab8500*
-F:	drivers/mfd/stmpe*
+F:	drivers/mfd/dbx500*
+F:	drivers/mfd/db8500*
+F:	drivers/pinctrl/pinctrl-nomadik*
 F:	drivers/rtc/rtc-ab8500.c
+F:	drivers/rtc/rtc-pl031.c
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 
 ARM/VFP SUPPORT
@@ -1227,9 +1232,9 @@
 F:	drivers/hwmon/asb100.c
 
 ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
-M:	Dan Williams <dan.j.williams@intel.com>
+M:	Dan Williams <djbw@fb.com>
 W:	http://sourceforge.net/projects/xscaleiop
-S:	Supported
+S:	Maintained
 F:	Documentation/crypto/async-tx-api.txt
 F:	crypto/async_tx/
 F:	drivers/dma/
@@ -2212,7 +2217,7 @@
 F:	drivers/scsi/tmscsim.*
 
 DC395x SCSI driver
-M:	Oliver Neukum <oliver@neukum.name>
+M:	Oliver Neukum <oliver@neukum.org>
 M:	Ali Akcaagac <aliakc@web.de>
 M:	Jamie Lenehan <lenehan@twibble.org>
 W:	http://twibble.org/dist/dc395x/
@@ -2359,7 +2364,7 @@
 
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:	Vinod Koul <vinod.koul@intel.com>
-M:	Dan Williams <dan.j.williams@intel.com>
+M:	Dan Williams <djbw@fb.com>
 S:	Supported
 F:	drivers/dma/
 F:	include/linux/dma*
@@ -3094,7 +3099,7 @@
 
 GPIO SUBSYSTEM
 M:	Grant Likely <grant.likely@secretlab.ca>
-M:	Linus Walleij <linus.walleij@stericsson.com>
+M:	Linus Walleij <linus.walleij@linaro.org>
 S:	Maintained
 T:	git git://git.secretlab.ca/git/linux-2.6.git
 F:	Documentation/gpio.txt
@@ -3547,7 +3552,6 @@
 
 INTEL C600 SERIES SAS CONTROLLER DRIVER
 M:	Intel SCU Linux support <intel-linux-scu@intel.com>
-M:	Dan Williams <dan.j.williams@intel.com>
 M:	Dave Jiang <dave.jiang@intel.com>
 M:	Ed Nadolski <edmund.nadolski@intel.com>
 L:	linux-scsi@vger.kernel.org
@@ -3590,8 +3594,8 @@
 F:	arch/x86/kernel/microcode_intel.c
 
 INTEL I/OAT DMA DRIVER
-M:	Dan Williams <dan.j.williams@intel.com>
-S:	Supported
+M:	Dan Williams <djbw@fb.com>
+S:	Maintained
 F:	drivers/dma/ioat*
 
 INTEL IOMMU (VT-d)
@@ -3603,8 +3607,8 @@
 F:	include/linux/intel-iommu.h
 
 INTEL IOP-ADMA DMA DRIVER
-M:	Dan Williams <dan.j.williams@intel.com>
-S:	Maintained
+M:	Dan Williams <djbw@fb.com>
+S:	Odd fixes
 F:	drivers/dma/iop-adma.c
 
 INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
@@ -4533,7 +4537,7 @@
 F:	arch/microblaze/
 
 MICROTEK X6 SCANNER
-M:	Oliver Neukum <oliver@neukum.name>
+M:	Oliver Neukum <oliver@neukum.org>
 S:	Maintained
 F:	drivers/usb/image/microtek.*
 
@@ -5329,14 +5333,15 @@
 M:	Linus Walleij <linus.walleij@linaro.org>
 S:	Maintained
 F:	drivers/pinctrl/
+F:	include/linux/pinctrl/
 
 PIN CONTROLLER - ST SPEAR
-M:     Viresh Kumar <viresh.linux@gmail.com>
+M:	Viresh Kumar <viresh.linux@gmail.com>
 L:	spear-devel@list.st.com
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://www.st.com/spear
 S:	Maintained
-F:	driver/pinctrl/spear/
+F:	drivers/pinctrl/spear/
 
 PKTCDVD DRIVER
 M:	Peter Osterlund <petero2@telia.com>
@@ -7071,7 +7076,7 @@
 F:	include/mtd/ubi-user.h
 
 USB ACM DRIVER
-M:	Oliver Neukum <oliver@neukum.name>
+M:	Oliver Neukum <oliver@neukum.org>
 L:	linux-usb@vger.kernel.org
 S:	Maintained
 F:	Documentation/usb/acm.txt
@@ -7092,7 +7097,7 @@
 F:	drivers/block/ub.c
 
 USB CDC ETHERNET DRIVER
-M:	Oliver Neukum <oliver@neukum.name>
+M:	Oliver Neukum <oliver@neukum.org>
 L:	linux-usb@vger.kernel.org
 S:	Maintained
 F:	drivers/net/usb/cdc_*.c
@@ -7165,7 +7170,7 @@
 F:	include/linux/usb/isp116x.h
 
 USB KAWASAKI LSI DRIVER
-M:	Oliver Neukum <oliver@neukum.name>
+M:	Oliver Neukum <oliver@neukum.org>
 L:	linux-usb@vger.kernel.org
 S:	Maintained
 F:	drivers/usb/serial/kl5kusb105.*
@@ -7283,6 +7288,12 @@
 S:	Supported
 F:	drivers/usb/serial/whiteheat*
 
+USB SMSC75XX ETHERNET DRIVER
+M:	Steve Glendinning <steve.glendinning@shawell.net>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/usb/smsc75xx.*
+
 USB SMSC95XX ETHERNET DRIVER
 M:	Steve Glendinning <steve.glendinning@shawell.net>
 L:	netdev@vger.kernel.org
@@ -7665,23 +7676,28 @@
 F:	Documentation/hwmon/wm83??
 F:	arch/arm/mach-s3c64xx/mach-crag6410*
 F:	drivers/clk/clk-wm83*.c
+F:	drivers/extcon/extcon-arizona.c
 F:	drivers/leds/leds-wm83*.c
 F:	drivers/gpio/gpio-*wm*.c
+F:	drivers/gpio/gpio-arizona.c
 F:	drivers/hwmon/wm83??-hwmon.c
 F:	drivers/input/misc/wm831x-on.c
 F:	drivers/input/touchscreen/wm831x-ts.c
 F:	drivers/input/touchscreen/wm97*.c
-F:	drivers/mfd/wm8*.c
+F:	drivers/mfd/arizona*
+F:	drivers/mfd/wm*.c
 F:	drivers/power/wm83*.c
 F:	drivers/rtc/rtc-wm83*.c
 F:	drivers/regulator/wm8*.c
 F:	drivers/video/backlight/wm83*_bl.c
 F:	drivers/watchdog/wm83*_wdt.c
+F:	include/linux/mfd/arizona/
 F:	include/linux/mfd/wm831x/
 F:	include/linux/mfd/wm8350/
 F:	include/linux/mfd/wm8400*
 F:	include/linux/wm97xx.h
 F:	include/sound/wm????.h
+F:	sound/soc/codecs/arizona.?
 F:	sound/soc/codecs/wm*
 
 WORKQUEUE
diff --git a/Makefile b/Makefile
index ddf5be9..9cc77ac 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index d5b9b5e..9944ded 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -18,6 +18,8 @@
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_CMOS_UPDATE
+	select GENERIC_STRNCPY_FROM_USER
+	select GENERIC_STRNLEN_USER
 	help
 	  The Alpha is a 64-bit general-purpose processor designed and
 	  marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 3bb7ffea..c2cbe4f 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,8 +14,8 @@
  */
 
 
-#define ATOMIC_INIT(i)		( (atomic_t) { (i) } )
-#define ATOMIC64_INIT(i)	( (atomic64_t) { (i) } )
+#define ATOMIC_INIT(i)		{ (i) }
+#define ATOMIC64_INIT(i)	{ (i) }
 
 #define atomic_read(v)		(*(volatile int *)&(v)->counter)
 #define atomic64_read(v)	(*(volatile long *)&(v)->counter)
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h
index db00f78..e477bcd 100644
--- a/arch/alpha/include/asm/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
@@ -1,7 +1,9 @@
 #ifndef __ASM_ALPHA_FPU_H
 #define __ASM_ALPHA_FPU_H
 
+#ifdef __KERNEL__
 #include <asm/special_insns.h>
+#endif
 
 /*
  * Alpha floating-point control register defines:
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h
index fd698a1..b87755a 100644
--- a/arch/alpha/include/asm/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
@@ -76,7 +76,10 @@
 #define task_pt_regs(task) \
   ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
 
-#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
+#define current_pt_regs() \
+  ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1)
+
+#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
 
 #endif
 
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index dcb221a..7d2f75b 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -76,9 +76,11 @@
 /* Instruct lower device to use last 4-bytes of skb data as FCS */
 #define SO_NOFCS		43
 
+#ifdef __KERNEL__
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
 #define SOCK_NONBLOCK	0x40000000
+#endif /* __KERNEL__ */
 
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index b49ec2f..766fdfd 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -433,36 +433,12 @@
 #undef __module_address
 #undef __module_call
 
-/* Returns: -EFAULT if exception before terminator, N if the entire
-   buffer filled, else strlen.  */
+#define user_addr_max() \
+        (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
 
-extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
-
-extern inline long
-strncpy_from_user(char *to, const char __user *from, long n)
-{
-	long ret = -EFAULT;
-	if (__access_ok((unsigned long)from, 0, get_fs()))
-		ret = __strncpy_from_user(to, from, n);
-	return ret;
-}
-
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-extern long __strlen_user(const char __user *);
-
-extern inline long strlen_user(const char __user *str)
-{
-	return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
-}
-
-/* Returns: 0 if exception before NUL or reaching the supplied limit (N),
- * a value greater than N if the limit would be exceeded, else strlen.  */
-extern long __strnlen_user(const char __user *, long);
-
-extern inline long strnlen_user(const char __user *str, long n)
-{
-	return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
 /*
  * About the exception table:
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 633b23b..a31a78e 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -465,10 +465,12 @@
 #define __NR_setns			501
 #define __NR_accept4			502
 #define __NR_sendmmsg			503
+#define __NR_process_vm_readv		504
+#define __NR_process_vm_writev		505
 
 #ifdef __KERNEL__
 
-#define NR_SYSCALLS			504
+#define NR_SYSCALLS			506
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h
new file mode 100644
index 0000000..6b340d0
--- /dev/null
+++ b/arch/alpha/include/asm/word-at-a-time.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <asm/compiler.h>
+
+/*
+ * word-at-a-time interface for Alpha.
+ */
+
+/*
+ * We do not use the word_at_a_time struct on Alpha, but it needs to be
+ * implemented to humour the generic code.
+ */
+struct word_at_a_time {
+	const unsigned long unused;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { 0 }
+
+/* Return nonzero if val has a zero */
+static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c)
+{
+	unsigned long zero_locations = __kernel_cmpbge(0, val);
+	*bits = zero_locations;
+	return zero_locations;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c)
+{
+	return bits;
+}
+
+#define create_zero_mask(bits) (bits)
+
+static inline unsigned long find_zero(unsigned long bits)
+{
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
+	/* Simple if have CIX instructions */
+	return __kernel_cttz(bits);
+#else
+	unsigned long t1, t2, t3;
+	/* Retain lowest set bit only */
+	bits &= -bits;
+	/* Binary search for lowest set bit */
+	t1 = bits & 0xf0;
+	t2 = bits & 0xcc;
+	t3 = bits & 0xaa;
+	if (t1) t1 = 4;
+	if (t2) t2 = 2;
+	if (t3) t3 = 1;
+	return t1 + t2 + t3;
+#endif
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index d96e742..15fa821 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -52,7 +52,6 @@
 
 /* entry.S */
 EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(kernel_execve);
 
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_tcpudp_magic);
@@ -74,8 +73,6 @@
  */
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(__do_clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
 
 /* 
  * SMP-specific symbols.
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index 6d159ce..ec0da05 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -663,58 +663,6 @@
 	br	ret_to_kernel
 .end kernel_thread
 
-/*
- * kernel_execve(path, argv, envp)
- */
-	.align	4
-	.globl	kernel_execve
-	.ent	kernel_execve
-kernel_execve:
-	/* We can be called from a module.  */
-	ldgp	$gp, 0($27)
-	lda	$sp, -(32+SIZEOF_PT_REGS+8)($sp)
-	.frame	$sp, 32+SIZEOF_PT_REGS+8, $26, 0
-	stq	$26, 0($sp)
-	stq	$16, 8($sp)
-	stq	$17, 16($sp)
-	stq	$18, 24($sp)
-	.prologue 1
-
-	lda	$16, 32($sp)
-	lda	$17, 0
-	lda	$18, SIZEOF_PT_REGS
-	bsr	$26, memset		!samegp
-
-	/* Avoid the HAE being gratuitously wrong, which would cause us
-	   to do the whole turn off interrupts thing and restore it.  */
-	ldq	$2, alpha_mv+HAE_CACHE
-	stq	$2, 152+32($sp)
-
-	ldq	$16, 8($sp)
-	ldq	$17, 16($sp)
-	ldq	$18, 24($sp)
-	lda	$19, 32($sp)
-	bsr	$26, do_execve		!samegp
-
-	ldq	$26, 0($sp)
-	bne	$0, 1f			/* error! */
-
-	/* Move the temporary pt_regs struct from its current location
-	   to the top of the kernel stack frame.  See copy_thread for
-	   details for a normal process.  */
-	lda	$16, 0x4000 - SIZEOF_PT_REGS($8)
-	lda	$17, 32($sp)
-	lda	$18, SIZEOF_PT_REGS
-	bsr	$26, memmove		!samegp
-
-	/* Take that over as our new stack frame and visit userland!  */
-	lda	$sp, 0x4000 - SIZEOF_PT_REGS($8)
-	br	$31, ret_from_sys_call
-
-1:	lda	$sp, 32+SIZEOF_PT_REGS+8($sp)
-	ret
-.end kernel_execve
-
 
 /*
  * Special system calls.  Most of these are special in that they either
@@ -797,115 +745,6 @@
 .end sys_rt_sigreturn
 
 	.align	4
-	.globl	sys_sethae
-	.ent	sys_sethae
-sys_sethae:
-	.prologue 0
-	stq	$16, 152($sp)
-	ret
-.end sys_sethae
-
-	.align	4
-	.globl	osf_getpriority
-	.ent	osf_getpriority
-osf_getpriority:
-	lda	$sp, -16($sp)
-	stq	$26, 0($sp)
-	.prologue 0
-
-	jsr	$26, sys_getpriority
-
-	ldq	$26, 0($sp)
-	blt	$0, 1f
-
-	/* Return value is the unbiased priority, i.e. 20 - prio.
-	   This does result in negative return values, so signal
-	   no error by writing into the R0 slot.  */
-	lda	$1, 20
-	stq	$31, 16($sp)
-	subl	$1, $0, $0
-	unop
-
-1:	lda	$sp, 16($sp)
-	ret
-.end osf_getpriority
-
-	.align	4
-	.globl	sys_getxuid
-	.ent	sys_getxuid
-sys_getxuid:
-	.prologue 0
-	ldq	$2, TI_TASK($8)
-	ldq	$3, TASK_CRED($2)
-	ldl	$0, CRED_UID($3)
-	ldl	$1, CRED_EUID($3)
-	stq	$1, 80($sp)
-	ret
-.end sys_getxuid
-
-	.align	4
-	.globl	sys_getxgid
-	.ent	sys_getxgid
-sys_getxgid:
-	.prologue 0
-	ldq	$2, TI_TASK($8)
-	ldq	$3, TASK_CRED($2)
-	ldl	$0, CRED_GID($3)
-	ldl	$1, CRED_EGID($3)
-	stq	$1, 80($sp)
-	ret
-.end sys_getxgid
-
-	.align	4
-	.globl	sys_getxpid
-	.ent	sys_getxpid
-sys_getxpid:
-	.prologue 0
-	ldq	$2, TI_TASK($8)
-
-	/* See linux/kernel/timer.c sys_getppid for discussion
-	   about this loop.  */
-	ldq	$3, TASK_GROUP_LEADER($2)
-	ldq	$4, TASK_REAL_PARENT($3)
-	ldl	$0, TASK_TGID($2)
-1:	ldl	$1, TASK_TGID($4)
-#ifdef CONFIG_SMP
-	mov	$4, $5
-	mb
-	ldq	$3, TASK_GROUP_LEADER($2)
-	ldq	$4, TASK_REAL_PARENT($3)
-	cmpeq	$4, $5, $5
-	beq	$5, 1b
-#endif
-	stq	$1, 80($sp)
-	ret
-.end sys_getxpid
-
-	.align	4
-	.globl	sys_alpha_pipe
-	.ent	sys_alpha_pipe
-sys_alpha_pipe:
-	lda	$sp, -16($sp)
-	stq	$26, 0($sp)
-	.prologue 0
-
-	mov	$31, $17
-	lda	$16, 8($sp)
-	jsr	$26, do_pipe_flags
-
-	ldq	$26, 0($sp)
-	bne	$0, 1f
-
-	/* The return values are in $0 and $20.  */
-	ldl	$1, 12($sp)
-	ldl	$0, 8($sp)
-
-	stq	$1, 80+16($sp)
-1:	lda	$sp, 16($sp)
-	ret
-.end sys_alpha_pipe
-
-	.align	4
 	.globl	sys_execve
 	.ent	sys_execve
 sys_execve:
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 98a1036..bc1acdd 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1404,3 +1404,52 @@
 }
 
 #endif
+
+SYSCALL_DEFINE2(osf_getpriority, int, which, int, who)
+{
+	int prio = sys_getpriority(which, who);
+	if (prio >= 0) {
+		/* Return value is the unbiased priority, i.e. 20 - prio.
+		   This does result in negative return values, so signal
+		   no error */
+		force_successful_syscall_return();
+		prio = 20 - prio;
+	}
+	return prio;
+}
+
+SYSCALL_DEFINE0(getxuid)
+{
+	current_pt_regs()->r20 = sys_geteuid();
+	return sys_getuid();
+}
+
+SYSCALL_DEFINE0(getxgid)
+{
+	current_pt_regs()->r20 = sys_getegid();
+	return sys_getgid();
+}
+
+SYSCALL_DEFINE0(getxpid)
+{
+	current_pt_regs()->r20 = sys_getppid();
+	return sys_getpid();
+}
+
+SYSCALL_DEFINE0(alpha_pipe)
+{
+	int fd[2];
+	int res = do_pipe_flags(fd, 0);
+	if (!res) {
+		/* The return values are in $0 and $20.  */
+		current_pt_regs()->r20 = fd[1];
+		res = fd[0];
+	}
+	return res;
+}
+
+SYSCALL_DEFINE1(sethae, unsigned long, val)
+{
+	current_pt_regs()->hae = val;
+	return 0;
+}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 153d3fc..d6fde98 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -455,3 +455,22 @@
 	}
 	return pc;
 }
+
+int kernel_execve(const char *path, const char *const argv[], const char *const envp[])
+{
+	/* Avoid the HAE being gratuitously wrong, which would cause us
+	   to do the whole turn off interrupts thing and restore it.  */
+	struct pt_regs regs = {.hae = alpha_mv.hae_cache};
+	int err = do_execve(path, argv, envp, &regs);
+	if (!err) {
+		struct pt_regs *p = current_pt_regs();
+		/* copy regs to normal position and off to userland we go... */
+		*p = regs;
+		__asm__ __volatile__ (
+			"mov	%0, $sp;"
+			"br	$31, ret_from_sys_call"
+			: : "r"(p));
+	}
+	return err;
+}
+EXPORT_SYMBOL(kernel_execve);
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 8783523..2ac6b45 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -111,7 +111,7 @@
 	.quad sys_socket
 	.quad sys_connect
 	.quad sys_accept
-	.quad osf_getpriority			/* 100 */
+	.quad sys_osf_getpriority			/* 100 */
 	.quad sys_send
 	.quad sys_recv
 	.quad sys_sigreturn
@@ -522,6 +522,8 @@
 	.quad sys_setns
 	.quad sys_accept4
 	.quad sys_sendmmsg
+	.quad sys_process_vm_readv
+	.quad sys_process_vm_writev		/* 505 */
 
 	.size sys_call_table, . - sys_call_table
 	.type sys_call_table, @object
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index c0a83ab..5966074 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -31,8 +31,6 @@
 	$(ev6-y)memchr.o \
 	$(ev6-y)copy_user.o \
 	$(ev6-y)clear_user.o \
-	$(ev6-y)strncpy_from_user.o \
-	$(ev67-y)strlen_user.o \
 	$(ev6-y)csum_ipv6_magic.o \
 	$(ev6-y)clear_page.o \
 	$(ev6-y)copy_page.o \
diff --git a/arch/alpha/lib/ev6-strncpy_from_user.S b/arch/alpha/lib/ev6-strncpy_from_user.S
deleted file mode 100644
index d2e2817..0000000
--- a/arch/alpha/lib/ev6-strncpy_from_user.S
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * arch/alpha/lib/ev6-strncpy_from_user.S
- * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
- *
- * Just like strncpy except in the return value:
- *
- * -EFAULT       if an exception occurs before the terminator is copied.
- * N             if the buffer filled.
- *
- * Otherwise the length of the string is returned.
- *
- * Much of the information about 21264 scheduling/coding comes from:
- *	Compiler Writer's Guide for the Alpha 21264
- *	abbreviated as 'CWG' in other comments here
- *	ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
- * Scheduling notation:
- *	E	- either cluster
- *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
- *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
- * A bunch of instructions got moved and temp registers were changed
- * to aid in scheduling.  Control flow was also re-arranged to eliminate
- * branches, and to provide longer code sequences to enable better scheduling.
- * A total rewrite (using byte load/stores for start & tail sequences)
- * is desirable, but very difficult to do without a from-scratch rewrite.
- * Save that for the future.
- */
-
-
-#include <asm/errno.h>
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one.  */
-#define EX(x,y...)			\
-	99: x,##y;			\
-	.section __ex_table,"a";	\
-	.long 99b - .;			\
-	lda $31, $exception-99b($0); 	\
-	.previous
-
-
-	.set noat
-	.set noreorder
-	.text
-
-	.globl __strncpy_from_user
-	.ent __strncpy_from_user
-	.frame $30, 0, $26
-	.prologue 0
-
-	.align 4
-__strncpy_from_user:
-	and	a0, 7, t3	# E : find dest misalignment
-	beq	a2, $zerolength	# U :
-
-	/* Are source and destination co-aligned?  */
-	mov	a0, v0		# E : save the string start
-	xor	a0, a1, t4	# E :
-	EX( ldq_u t1, 0(a1) )	# L : Latency=3 load first quadword
-	ldq_u	t0, 0(a0)	# L : load first (partial) aligned dest quadword
-
-	addq	a2, t3, a2	# E : bias count by dest misalignment
-	subq	a2, 1, a3	# E :
-	addq	zero, 1, t10	# E :
-	and	t4, 7, t4	# E : misalignment between the two
-
-	and	a3, 7, t6	# E : number of tail bytes
-	sll	t10, t6, t10	# E : t10 = bitmask of last count byte
-	bne	t4, $unaligned	# U :
-	lda	t2, -1		# E : build a mask against false zero
-
-	/*
-	 * We are co-aligned; take care of a partial first word.
-	 * On entry to this basic block:
-	 * t0 == the first destination word for masking back in
-	 * t1 == the first source word.
-	 */
-
-	srl	a3, 3, a2	# E : a2 = loop counter = (count - 1)/8
-	addq	a1, 8, a1	# E :
-	mskqh	t2, a1, t2	# U :   detection in the src word
-	nop
-
-	/* Create the 1st output word and detect 0's in the 1st input word.  */
-	mskqh	t1, a1, t3	# U :
-	mskql	t0, a1, t0	# U : assemble the first output word
-	ornot	t1, t2, t2	# E :
-	nop
-
-	cmpbge	zero, t2, t8	# E : bits set iff null found
-	or	t0, t3, t0	# E :
-	beq	a2, $a_eoc	# U :
-	bne	t8, $a_eos	# U : 2nd branch in a quad.  Bad.
-
-	/* On entry to this basic block:
-	 * t0 == a source quad not containing a null.
-	 * a0 - current aligned destination address
-	 * a1 - current aligned source address
-	 * a2 - count of quadwords to move.
-	 * NOTE: Loop improvement - unrolling this is going to be
-	 *	a huge win, since we're going to stall otherwise.
-	 *	Fix this later.  For _really_ large copies, look
-	 *	at using wh64 on a look-ahead basis.  See the code
-	 *	in clear_user.S and copy_user.S.
-	 * Presumably, since (a0) and (a1) do not overlap (by C definition)
-	 * Lots of nops here:
-	 *	- Separate loads from stores
-	 *	- Keep it to 1 branch/quadpack so the branch predictor
-	 *	  can train.
-	 */
-$a_loop:
-	stq_u	t0, 0(a0)	# L :
-	addq	a0, 8, a0	# E :
-	nop
-	subq	a2, 1, a2	# E :
-
-	EX( ldq_u t0, 0(a1) )	# L :
-	addq	a1, 8, a1	# E :
-	cmpbge	zero, t0, t8	# E : Stall 2 cycles on t0
-	beq	a2, $a_eoc      # U :
-
-	beq	t8, $a_loop	# U :
-	nop
-	nop
-	nop
-
-	/* Take care of the final (partial) word store.  At this point
-	 * the end-of-count bit is set in t8 iff it applies.
-	 *
-	 * On entry to this basic block we have:
-	 * t0 == the source word containing the null
-	 * t8 == the cmpbge mask that found it.
-	 */
-$a_eos:
-	negq	t8, t12		# E : find low bit set
-	and	t8, t12, t12	# E : 
-
-	/* We're doing a partial word store and so need to combine
-	   our source and original destination words.  */
-	ldq_u	t1, 0(a0)	# L :
-	subq	t12, 1, t6	# E :
-
-	or	t12, t6, t8	# E :
-	zapnot	t0, t8, t0	# U : clear src bytes > null
-	zap	t1, t8, t1	# U : clear dst bytes <= null
-	or	t0, t1, t0	# E :
-
-	stq_u	t0, 0(a0)	# L :
-	br	$finish_up	# L0 :
-	nop
-	nop
-
-	/* Add the end-of-count bit to the eos detection bitmask.  */
-	.align 4
-$a_eoc:
-	or	t10, t8, t8
-	br	$a_eos
-	nop
-	nop
-
-
-/* The source and destination are not co-aligned.  Align the destination
-   and cope.  We have to be very careful about not reading too much and
-   causing a SEGV.  */
-
-	.align 4
-$u_head:
-	/* We know just enough now to be able to assemble the first
-	   full source word.  We can still find a zero at the end of it
-	   that prevents us from outputting the whole thing.
-
-	   On entry to this basic block:
-	   t0 == the first dest word, unmasked
-	   t1 == the shifted low bits of the first source word
-	   t6 == bytemask that is -1 in dest word bytes */
-
-	EX( ldq_u t2, 8(a1) )	# L : load second src word
-	addq	a1, 8, a1	# E :
-	mskql	t0, a0, t0	# U : mask trailing garbage in dst
-	extqh	t2, a1, t4	# U :
-
-	or	t1, t4, t1	# E : first aligned src word complete
-	mskqh	t1, a0, t1	# U : mask leading garbage in src
-	or	t0, t1, t0	# E : first output word complete
-	or	t0, t6, t6	# E : mask original data for zero test
-
-	cmpbge	zero, t6, t8	# E :
-	beq	a2, $u_eocfin	# U :
-	bne	t8, $u_final	# U : bad news - 2nd branch in a quad
-	lda	t6, -1		# E : mask out the bits we have
-
-	mskql	t6, a1, t6	# U :   already seen
-	stq_u	t0, 0(a0)	# L : store first output word
-	or      t6, t2, t2	# E :
-	cmpbge	zero, t2, t8	# E : find nulls in second partial
-
-	addq	a0, 8, a0		# E :
-	subq	a2, 1, a2		# E :
-	bne	t8, $u_late_head_exit	# U :
-	nop
-
-	/* Finally, we've got all the stupid leading edge cases taken care
-	   of and we can set up to enter the main loop.  */
-
-	extql	t2, a1, t1	# U : position hi-bits of lo word
-	EX( ldq_u t2, 8(a1) )	# L : read next high-order source word
-	addq	a1, 8, a1	# E :
-	cmpbge	zero, t2, t8	# E :
-
-	beq	a2, $u_eoc	# U :
-	bne	t8, $u_eos	# U :
-	nop
-	nop
-
-	/* Unaligned copy main loop.  In order to avoid reading too much,
-	   the loop is structured to detect zeros in aligned source words.
-	   This has, unfortunately, effectively pulled half of a loop
-	   iteration out into the head and half into the tail, but it does
-	   prevent nastiness from accumulating in the very thing we want
-	   to run as fast as possible.
-
-	   On entry to this basic block:
-	   t1 == the shifted high-order bits from the previous source word
-	   t2 == the unshifted current source word
-
-	   We further know that t2 does not contain a null terminator.  */
-
-	/*
-	 * Extra nops here:
-	 *	separate load quads from store quads
-	 *	only one branch/quad to permit predictor training
-	 */
-
-	.align 4
-$u_loop:
-	extqh	t2, a1, t0	# U : extract high bits for current word
-	addq	a1, 8, a1	# E :
-	extql	t2, a1, t3	# U : extract low bits for next time
-	addq	a0, 8, a0	# E :
-
-	or	t0, t1, t0	# E : current dst word now complete
-	EX( ldq_u t2, 0(a1) )	# L : load high word for next time
-	subq	a2, 1, a2	# E :
-	nop
-
-	stq_u	t0, -8(a0)	# L : save the current word
-	mov	t3, t1		# E :
-	cmpbge	zero, t2, t8	# E : test new word for eos
-	beq	a2, $u_eoc	# U :
-
-	beq	t8, $u_loop	# U :
-	nop
-	nop
-	nop
-
-	/* We've found a zero somewhere in the source word we just read.
-	   If it resides in the lower half, we have one (probably partial)
-	   word to write out, and if it resides in the upper half, we
-	   have one full and one partial word left to write out.
-
-	   On entry to this basic block:
-	   t1 == the shifted high-order bits from the previous source word
-	   t2 == the unshifted current source word.  */
-	.align 4
-$u_eos:
-	extqh	t2, a1, t0	# U :
-	or	t0, t1, t0	# E : first (partial) source word complete
-	cmpbge	zero, t0, t8	# E : is the null in this first bit?
-	nop
-
-	bne	t8, $u_final	# U :
-	stq_u	t0, 0(a0)	# L : the null was in the high-order bits
-	addq	a0, 8, a0	# E :
-	subq	a2, 1, a2	# E :
-
-	.align 4
-$u_late_head_exit:
-	extql	t2, a1, t0	# U :
-	cmpbge	zero, t0, t8	# E :
-	or	t8, t10, t6	# E :
-	cmoveq	a2, t6, t8	# E :
-
-	/* Take care of a final (probably partial) result word.
-	   On entry to this basic block:
-	   t0 == assembled source word
-	   t8 == cmpbge mask that found the null.  */
-	.align 4
-$u_final:
-	negq	t8, t6		# E : isolate low bit set
-	and	t6, t8, t12	# E :
-	ldq_u	t1, 0(a0)	# L :
-	subq	t12, 1, t6	# E :
-
-	or	t6, t12, t8	# E :
-	zapnot	t0, t8, t0	# U : kill source bytes > null
-	zap	t1, t8, t1	# U : kill dest bytes <= null
-	or	t0, t1, t0	# E :
-
-	stq_u	t0, 0(a0)	# E :
-	br	$finish_up	# U :
-	nop
-	nop
-
-	.align 4
-$u_eoc:				# end-of-count
-	extqh	t2, a1, t0	# U :
-	or	t0, t1, t0	# E :
-	cmpbge	zero, t0, t8	# E :
-	nop
-
-	.align 4
-$u_eocfin:			# end-of-count, final word
-	or	t10, t8, t8	# E :
-	br	$u_final	# U :
-	nop
-	nop
-
-	/* Unaligned copy entry point.  */
-	.align 4
-$unaligned:
-
-	srl	a3, 3, a2	# U : a2 = loop counter = (count - 1)/8
-	and	a0, 7, t4	# E : find dest misalignment
-	and	a1, 7, t5	# E : find src misalignment
-	mov	zero, t0	# E :
-
-	/* Conditionally load the first destination word and a bytemask
-	   with 0xff indicating that the destination byte is sacrosanct.  */
-
-	mov	zero, t6	# E :
-	beq	t4, 1f		# U :
-	ldq_u	t0, 0(a0)	# L :
-	lda	t6, -1		# E :
-
-	mskql	t6, a0, t6	# E :
-	nop
-	nop
-	nop
-
-	.align 4
-1:
-	subq	a1, t4, a1	# E : sub dest misalignment from src addr
-	/* If source misalignment is larger than dest misalignment, we need
-	   extra startup checks to avoid SEGV.  */
-	cmplt	t4, t5, t12	# E :
-	extql	t1, a1, t1	# U : shift src into place
-	lda	t2, -1		# E : for creating masks later
-
-	beq	t12, $u_head	# U :
-	mskqh	t2, t5, t2	# U : begin src byte validity mask
-	cmpbge	zero, t1, t8	# E : is there a zero?
-	nop
-
-	extql	t2, a1, t2	# U :
-	or	t8, t10, t5	# E : test for end-of-count too
-	cmpbge	zero, t2, t3	# E :
-	cmoveq	a2, t5, t8	# E : Latency=2, extra map slot
-
-	nop			# E : goes with cmov
-	andnot	t8, t3, t8	# E :
-	beq	t8, $u_head	# U :
-	nop
-
-	/* At this point we've found a zero in the first partial word of
-	   the source.  We need to isolate the valid source data and mask
-	   it into the original destination data.  (Incidentally, we know
-	   that we'll need at least one byte of that original dest word.) */
-
-	ldq_u	t0, 0(a0)	# L :
-	negq	t8, t6		# E : build bitmask of bytes <= zero
-	mskqh	t1, t4, t1	# U :
-	and	t6, t8, t12	# E :
-
-	subq	t12, 1, t6	# E :
-	or	t6, t12, t8	# E :
-	zapnot	t2, t8, t2	# U : prepare source word; mirror changes
-	zapnot	t1, t8, t1	# U : to source validity mask
-
-	andnot	t0, t2, t0	# E : zero place for source to reside
-	or	t0, t1, t0	# E : and put it there
-	stq_u	t0, 0(a0)	# L :
-	nop
-
-	.align 4
-$finish_up:
-	zapnot	t0, t12, t4	# U : was last byte written null?
-	and	t12, 0xf0, t3	# E : binary search for the address of the
-	cmovne	t4, 1, t4	# E : Latency=2, extra map slot
-	nop			# E : with cmovne
-
-	and	t12, 0xcc, t2	# E : last byte written
-	and	t12, 0xaa, t1	# E :
-	cmovne	t3, 4, t3	# E : Latency=2, extra map slot
-	nop			# E : with cmovne
-
-	bic	a0, 7, t0
-	cmovne	t2, 2, t2	# E : Latency=2, extra map slot
-	nop			# E : with cmovne
-	nop
-
-	cmovne	t1, 1, t1	# E : Latency=2, extra map slot
-	nop			# E : with cmovne
-	addq	t0, t3, t0	# E :
-	addq	t1, t2, t1	# E :
-
-	addq	t0, t1, t0	# E :
-	addq	t0, t4, t0	# add one if we filled the buffer
-	subq	t0, v0, v0	# find string length
-	ret			# L0 :
-
-	.align 4
-$zerolength:
-	nop
-	nop
-	nop
-	clr	v0
-
-$exception:
-	nop
-	nop
-	nop
-	ret
-
-	.end __strncpy_from_user
diff --git a/arch/alpha/lib/ev67-strlen_user.S b/arch/alpha/lib/ev67-strlen_user.S
deleted file mode 100644
index 57e0d77..0000000
--- a/arch/alpha/lib/ev67-strlen_user.S
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * arch/alpha/lib/ev67-strlen_user.S
- * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
- *
- * Return the length of the string including the NULL terminator
- * (strlen+1) or zero if an error occurred.
- *
- * In places where it is critical to limit the processing time,
- * and the data is not trusted, strnlen_user() should be used.
- * It will return a value greater than its second argument if
- * that limit would be exceeded. This implementation is allowed
- * to access memory beyond the limit, but will not cross a page
- * boundary when doing so.
- *
- * Much of the information about 21264 scheduling/coding comes from:
- *      Compiler Writer's Guide for the Alpha 21264
- *      abbreviated as 'CWG' in other comments here
- *      ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
- * Scheduling notation:
- *      E       - either cluster
- *      U       - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
- *      L       - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
- * Try not to change the actual algorithm if possible for consistency.
- */
-
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one.  */
-#define EX(x,y...)			\
-	99: x,##y;			\
-	.section __ex_table,"a";	\
-	.long 99b - .;			\
-	lda v0, $exception-99b(zero);	\
-	.previous
-
-
-	.set noreorder
-	.set noat
-	.text
-
-	.globl __strlen_user
-	.ent __strlen_user
-	.frame sp, 0, ra
-
-	.align 4
-__strlen_user:
-	ldah	a1, 32767(zero)	# do not use plain strlen_user() for strings
-				# that might be almost 2 GB long; you should
-				# be using strnlen_user() instead
-	nop
-	nop
-	nop
-
-	.globl __strnlen_user
-
-	.align 4
-__strnlen_user:
-	.prologue 0
-	EX( ldq_u t0, 0(a0) )	# L : load first quadword (a0 may be misaligned)
-	lda     t1, -1(zero)	# E :
-
-	insqh   t1, a0, t1	# U :
-	andnot  a0, 7, v0	# E :
-	or      t1, t0, t0	# E :
-	subq	a0, 1, a0	# E : get our +1 for the return 
-
-	cmpbge  zero, t0, t1	# E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0
-	subq	a1, 7, t2	# E :
-	subq	a0, v0, t0	# E :
-	bne     t1, $found	# U :
-
-	addq	t2, t0, t2	# E :
-	addq	a1, 1, a1	# E :
-	nop			# E :
-	nop			# E :
-
-	.align 4
-$loop:	ble	t2, $limit	# U :
-	EX( ldq t0, 8(v0) )	# L :
-	nop			# E :
-	nop			# E :
-
-	cmpbge  zero, t0, t1	# E :
-	subq	t2, 8, t2	# E :
-	addq    v0, 8, v0	# E : addr += 8
-	beq     t1, $loop	# U :
-
-$found: cttz	t1, t2		# U0 :
-	addq	v0, t2, v0	# E :
-	subq    v0, a0, v0	# E :
-	ret			# L0 :
-
-$exception:
-	nop
-	nop
-	nop
-	ret
-
-	.align 4		# currently redundant
-$limit:
-	nop
-	nop
-	subq	a1, t2, v0
-	ret
-
-	.end __strlen_user
diff --git a/arch/alpha/lib/strlen_user.S b/arch/alpha/lib/strlen_user.S
deleted file mode 100644
index 508a18e..0000000
--- a/arch/alpha/lib/strlen_user.S
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * arch/alpha/lib/strlen_user.S
- *
- * Return the length of the string including the NUL terminator
- * (strlen+1) or zero if an error occurred.
- *
- * In places where it is critical to limit the processing time,
- * and the data is not trusted, strnlen_user() should be used.
- * It will return a value greater than its second argument if
- * that limit would be exceeded. This implementation is allowed
- * to access memory beyond the limit, but will not cross a page
- * boundary when doing so.
- */
-
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one.  */
-#define EX(x,y...)			\
-	99: x,##y;			\
-	.section __ex_table,"a";	\
-	.long 99b - .;			\
-	lda v0, $exception-99b(zero);	\
-	.previous
-
-
-	.set noreorder
-	.set noat
-	.text
-
-	.globl __strlen_user
-	.ent __strlen_user
-	.frame sp, 0, ra
-
-	.align 3
-__strlen_user:
-	ldah	a1, 32767(zero)	# do not use plain strlen_user() for strings
-				# that might be almost 2 GB long; you should
-				# be using strnlen_user() instead
-
-	.globl __strnlen_user
-
-	.align 3
-__strnlen_user:
-	.prologue 0
-
-	EX( ldq_u t0, 0(a0) )	# load first quadword (a0 may be misaligned)
-	lda     t1, -1(zero)
-	insqh   t1, a0, t1
-	andnot  a0, 7, v0
-	or      t1, t0, t0
-	subq	a0, 1, a0	# get our +1 for the return 
-	cmpbge  zero, t0, t1	# t1 <- bitmask: bit i == 1 <==> i-th byte == 0
-	subq	a1, 7, t2
-	subq	a0, v0, t0
-	bne     t1, $found
-
-	addq	t2, t0, t2
-	addq	a1, 1, a1
-
-	.align 3
-$loop:	ble	t2, $limit
-	EX( ldq t0, 8(v0) )
-	subq	t2, 8, t2
-	addq    v0, 8, v0	# addr += 8
-	cmpbge  zero, t0, t1
-	beq     t1, $loop
-
-$found:	negq    t1, t2		# clear all but least set bit
-	and     t1, t2, t1
-
-	and     t1, 0xf0, t2	# binary search for that set bit
-	and	t1, 0xcc, t3
-	and	t1, 0xaa, t4
-	cmovne	t2, 4, t2
-	cmovne	t3, 2, t3
-	cmovne	t4, 1, t4
-	addq	t2, t3, t2
-	addq	v0, t4, v0
-	addq	v0, t2, v0
-	nop			# dual issue next two on ev4 and ev5
-	subq    v0, a0, v0
-$exception:
-	ret
-
-	.align 3		# currently redundant
-$limit:
-	subq	a1, t2, v0
-	ret
-
-	.end __strlen_user
diff --git a/arch/alpha/lib/strncpy_from_user.S b/arch/alpha/lib/strncpy_from_user.S
deleted file mode 100644
index 73ee211..0000000
--- a/arch/alpha/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * arch/alpha/lib/strncpy_from_user.S
- * Contributed by Richard Henderson (rth@tamu.edu)
- *
- * Just like strncpy except in the return value:
- *
- * -EFAULT       if an exception occurs before the terminator is copied.
- * N             if the buffer filled.
- *
- * Otherwise the length of the string is returned.
- */
-
-
-#include <asm/errno.h>
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one.  */
-#define EX(x,y...)			\
-	99: x,##y;			\
-	.section __ex_table,"a";	\
-	.long 99b - .;			\
-	lda $31, $exception-99b($0); 	\
-	.previous
-
-
-	.set noat
-	.set noreorder
-	.text
-
-	.globl __strncpy_from_user
-	.ent __strncpy_from_user
-	.frame $30, 0, $26
-	.prologue 0
-
-	.align 3
-$aligned:
-	/* On entry to this basic block:
-	   t0 == the first destination word for masking back in
-	   t1 == the first source word.  */
-
-	/* Create the 1st output word and detect 0's in the 1st input word.  */
-	lda	t2, -1		# e1    : build a mask against false zero
-	mskqh	t2, a1, t2	# e0    :   detection in the src word
-	mskqh	t1, a1, t3	# e0    :
-	ornot	t1, t2, t2	# .. e1 :
-	mskql	t0, a1, t0	# e0    : assemble the first output word
-	cmpbge	zero, t2, t8	# .. e1 : bits set iff null found
-	or	t0, t3, t0	# e0    :
-	beq	a2, $a_eoc	# .. e1 :
-	bne	t8, $a_eos	# .. e1 :
-
-	/* On entry to this basic block:
-	   t0 == a source word not containing a null.  */
-
-$a_loop:
-	stq_u	t0, 0(a0)	# e0    :
-	addq	a0, 8, a0	# .. e1 :
-	EX( ldq_u t0, 0(a1) )	# e0    :
-	addq	a1, 8, a1	# .. e1 :
-	subq	a2, 1, a2	# e0    :
-	cmpbge	zero, t0, t8	# .. e1 (stall)
-	beq	a2, $a_eoc      # e1    :
-	beq	t8, $a_loop	# e1    :
-
-	/* Take care of the final (partial) word store.  At this point
-	   the end-of-count bit is set in t8 iff it applies.
-
-	   On entry to this basic block we have:
-	   t0 == the source word containing the null
-	   t8 == the cmpbge mask that found it.  */
-
-$a_eos:
-	negq	t8, t12		# e0    : find low bit set
-	and	t8, t12, t12	# e1 (stall)
-
-	/* For the sake of the cache, don't read a destination word
-	   if we're not going to need it.  */
-	and	t12, 0x80, t6	# e0    :
-	bne	t6, 1f		# .. e1 (zdb)
-
-	/* We're doing a partial word store and so need to combine
-	   our source and original destination words.  */
-	ldq_u	t1, 0(a0)	# e0    :
-	subq	t12, 1, t6	# .. e1 :
-	or	t12, t6, t8	# e0    :
-	unop			#
-	zapnot	t0, t8, t0	# e0    : clear src bytes > null
-	zap	t1, t8, t1	# .. e1 : clear dst bytes <= null
-	or	t0, t1, t0	# e1    :
-
-1:	stq_u	t0, 0(a0)
-	br	$finish_up
-
-	/* Add the end-of-count bit to the eos detection bitmask.  */
-$a_eoc:
-	or	t10, t8, t8
-	br	$a_eos
-
-	/*** The Function Entry Point ***/
-	.align 3
-__strncpy_from_user:
-	mov	a0, v0		# save the string start
-	beq	a2, $zerolength
-
-	/* Are source and destination co-aligned?  */
-	xor	a0, a1, t1	# e0    :
-	and	a0, 7, t0	# .. e1 : find dest misalignment
-	and	t1, 7, t1	# e0    :
-	addq	a2, t0, a2	# .. e1 : bias count by dest misalignment
-	subq	a2, 1, a2	# e0    :
-	and	a2, 7, t2	# e1    :
-	srl	a2, 3, a2	# e0    : a2 = loop counter = (count - 1)/8
-	addq	zero, 1, t10	# .. e1 :
-	sll	t10, t2, t10	# e0    : t10 = bitmask of last count byte
-	bne	t1, $unaligned	# .. e1 :
-
-	/* We are co-aligned; take care of a partial first word.  */
-
-	EX( ldq_u t1, 0(a1) )	# e0    : load first src word
-	addq	a1, 8, a1	# .. e1 :
-
-	beq	t0, $aligned	# avoid loading dest word if not needed
-	ldq_u	t0, 0(a0)	# e0    :
-	br	$aligned	# .. e1 :
-
-
-/* The source and destination are not co-aligned.  Align the destination
-   and cope.  We have to be very careful about not reading too much and
-   causing a SEGV.  */
-
-	.align 3
-$u_head:
-	/* We know just enough now to be able to assemble the first
-	   full source word.  We can still find a zero at the end of it
-	   that prevents us from outputting the whole thing.
-
-	   On entry to this basic block:
-	   t0 == the first dest word, unmasked
-	   t1 == the shifted low bits of the first source word
-	   t6 == bytemask that is -1 in dest word bytes */
-
-	EX( ldq_u t2, 8(a1) )	# e0    : load second src word
-	addq	a1, 8, a1	# .. e1 :
-	mskql	t0, a0, t0	# e0    : mask trailing garbage in dst
-	extqh	t2, a1, t4	# e0    :
-	or	t1, t4, t1	# e1    : first aligned src word complete
-	mskqh	t1, a0, t1	# e0    : mask leading garbage in src
-	or	t0, t1, t0	# e0    : first output word complete
-	or	t0, t6, t6	# e1    : mask original data for zero test
-	cmpbge	zero, t6, t8	# e0    :
-	beq	a2, $u_eocfin	# .. e1 :
-	bne	t8, $u_final	# e1    :
-
-	lda	t6, -1			# e1    : mask out the bits we have
-	mskql	t6, a1, t6		# e0    :   already seen
-	stq_u	t0, 0(a0)		# e0    : store first output word
-	or      t6, t2, t2		# .. e1 :
-	cmpbge	zero, t2, t8		# e0    : find nulls in second partial
-	addq	a0, 8, a0		# .. e1 :
-	subq	a2, 1, a2		# e0    :
-	bne	t8, $u_late_head_exit	# .. e1 :
-
-	/* Finally, we've got all the stupid leading edge cases taken care
-	   of and we can set up to enter the main loop.  */
-
-	extql	t2, a1, t1	# e0    : position hi-bits of lo word
-	EX( ldq_u t2, 8(a1) )	# .. e1 : read next high-order source word
-	addq	a1, 8, a1	# e0    :
-	cmpbge	zero, t2, t8	# e1 (stall)
-	beq	a2, $u_eoc	# e1    :
-	bne	t8, $u_eos	# e1    :
-
-	/* Unaligned copy main loop.  In order to avoid reading too much,
-	   the loop is structured to detect zeros in aligned source words.
-	   This has, unfortunately, effectively pulled half of a loop
-	   iteration out into the head and half into the tail, but it does
-	   prevent nastiness from accumulating in the very thing we want
-	   to run as fast as possible.
-
-	   On entry to this basic block:
-	   t1 == the shifted high-order bits from the previous source word
-	   t2 == the unshifted current source word
-
-	   We further know that t2 does not contain a null terminator.  */
-
-	.align 3
-$u_loop:
-	extqh	t2, a1, t0	# e0    : extract high bits for current word
-	addq	a1, 8, a1	# .. e1 :
-	extql	t2, a1, t3	# e0    : extract low bits for next time
-	addq	a0, 8, a0	# .. e1 :
-	or	t0, t1, t0	# e0    : current dst word now complete
-	EX( ldq_u t2, 0(a1) )	# .. e1 : load high word for next time
-	stq_u	t0, -8(a0)	# e0    : save the current word
-	mov	t3, t1		# .. e1 :
-	subq	a2, 1, a2	# e0    :
-	cmpbge	zero, t2, t8	# .. e1 : test new word for eos
-	beq	a2, $u_eoc	# e1    :
-	beq	t8, $u_loop	# e1    :
-
-	/* We've found a zero somewhere in the source word we just read.
-	   If it resides in the lower half, we have one (probably partial)
-	   word to write out, and if it resides in the upper half, we
-	   have one full and one partial word left to write out.
-
-	   On entry to this basic block:
-	   t1 == the shifted high-order bits from the previous source word
-	   t2 == the unshifted current source word.  */
-$u_eos:
-	extqh	t2, a1, t0	# e0    :
-	or	t0, t1, t0	# e1    : first (partial) source word complete
-
-	cmpbge	zero, t0, t8	# e0    : is the null in this first bit?
-	bne	t8, $u_final	# .. e1 (zdb)
-
-	stq_u	t0, 0(a0)	# e0    : the null was in the high-order bits
-	addq	a0, 8, a0	# .. e1 :
-	subq	a2, 1, a2	# e1    :
-
-$u_late_head_exit:
-	extql	t2, a1, t0	# .. e0 :
-	cmpbge	zero, t0, t8	# e0    :
-	or	t8, t10, t6	# e1    :
-	cmoveq	a2, t6, t8	# e0    :
-	nop			# .. e1 :
-
-	/* Take care of a final (probably partial) result word.
-	   On entry to this basic block:
-	   t0 == assembled source word
-	   t8 == cmpbge mask that found the null.  */
-$u_final:
-	negq	t8, t6		# e0    : isolate low bit set
-	and	t6, t8, t12	# e1    :
-
-	and	t12, 0x80, t6	# e0    : avoid dest word load if we can
-	bne	t6, 1f		# .. e1 (zdb)
-
-	ldq_u	t1, 0(a0)	# e0    :
-	subq	t12, 1, t6	# .. e1 :
-	or	t6, t12, t8	# e0    :
-	zapnot	t0, t8, t0	# .. e1 : kill source bytes > null
-	zap	t1, t8, t1	# e0    : kill dest bytes <= null
-	or	t0, t1, t0	# e1    :
-
-1:	stq_u	t0, 0(a0)	# e0    :
-	br	$finish_up
-
-$u_eoc:				# end-of-count
-	extqh	t2, a1, t0
-	or	t0, t1, t0
-	cmpbge	zero, t0, t8
-
-$u_eocfin:			# end-of-count, final word
-	or	t10, t8, t8
-	br	$u_final
-
-	/* Unaligned copy entry point.  */
-	.align 3
-$unaligned:
-
-	EX( ldq_u t1, 0(a1) )	# e0    : load first source word
-
-	and	a0, 7, t4	# .. e1 : find dest misalignment
-	and	a1, 7, t5	# e0    : find src misalignment
-
-	/* Conditionally load the first destination word and a bytemask
-	   with 0xff indicating that the destination byte is sacrosanct.  */
-
-	mov	zero, t0	# .. e1 :
-	mov	zero, t6	# e0    :
-	beq	t4, 1f		# .. e1 :
-	ldq_u	t0, 0(a0)	# e0    :
-	lda	t6, -1		# .. e1 :
-	mskql	t6, a0, t6	# e0    :
-1:
-	subq	a1, t4, a1	# .. e1 : sub dest misalignment from src addr
-
-	/* If source misalignment is larger than dest misalignment, we need
-	   extra startup checks to avoid SEGV.  */
-
-	cmplt	t4, t5, t12	# e1    :
-	extql	t1, a1, t1	# .. e0 : shift src into place
-	lda	t2, -1		# e0    : for creating masks later
-	beq	t12, $u_head	# e1    :
-
-	mskqh	t2, t5, t2	# e0    : begin src byte validity mask
-	cmpbge	zero, t1, t8	# .. e1 : is there a zero?
-	extql	t2, a1, t2	# e0    :
-	or	t8, t10, t5	# .. e1 : test for end-of-count too
-	cmpbge	zero, t2, t3	# e0    :
-	cmoveq	a2, t5, t8	# .. e1 :
-	andnot	t8, t3, t8	# e0    :
-	beq	t8, $u_head	# .. e1 (zdb)
-
-	/* At this point we've found a zero in the first partial word of
-	   the source.  We need to isolate the valid source data and mask
-	   it into the original destination data.  (Incidentally, we know
-	   that we'll need at least one byte of that original dest word.) */
-
-	ldq_u	t0, 0(a0)	# e0    :
-	negq	t8, t6		# .. e1 : build bitmask of bytes <= zero
-	mskqh	t1, t4, t1	# e0    :
-	and	t6, t8, t12	# .. e1 :
-	subq	t12, 1, t6	# e0    :
-	or	t6, t12, t8	# e1    :
-
-	zapnot	t2, t8, t2	# e0    : prepare source word; mirror changes
-	zapnot	t1, t8, t1	# .. e1 : to source validity mask
-
-	andnot	t0, t2, t0	# e0    : zero place for source to reside
-	or	t0, t1, t0	# e1    : and put it there
-	stq_u	t0, 0(a0)	# e0    :
-
-$finish_up:
-	zapnot	t0, t12, t4	# was last byte written null?
-	cmovne	t4, 1, t4
-
-	and	t12, 0xf0, t3	# binary search for the address of the
-	and	t12, 0xcc, t2	# last byte written
-	and	t12, 0xaa, t1
-	bic	a0, 7, t0
-	cmovne	t3, 4, t3
-	cmovne	t2, 2, t2
-	cmovne	t1, 1, t1
-	addq	t0, t3, t0
-	addq	t1, t2, t1
-	addq	t0, t1, t0
-	addq	t0, t4, t0	# add one if we filled the buffer
-
-	subq	t0, v0, v0	# find string length
-	ret
-
-$zerolength:
-	clr	v0
-$exception:
-	ret
-
-	.end __strncpy_from_user
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 5eecab1..0c4132d 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -89,6 +89,8 @@
 	const struct exception_table_entry *fixup;
 	int fault, si_code = SEGV_MAPERR;
 	siginfo_t info;
+	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+			      (cause > 0 ? FAULT_FLAG_WRITE : 0));
 
 	/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
 	   (or is suppressed by the PALcode).  Support that for older CPUs
@@ -114,6 +116,7 @@
 		goto vmalloc_fault;
 #endif
 
+retry:
 	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, address);
 	if (!vma)
@@ -144,8 +147,11 @@
 	/* If for any reason at all we couldn't handle the fault,
 	   make sure we exit gracefully rather than endlessly redo
 	   the fault.  */
-	fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
-	up_read(&mm->mmap_sem);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -153,10 +159,26 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR)
-		current->maj_flt++;
-	else
-		current->min_flt++;
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR)
+			current->maj_flt++;
+		else
+			current->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			 /* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
+
+	up_read(&mm->mmap_sem);
+
 	return;
 
 	/* Something tried to access memory that isn't in our memory map.
@@ -186,12 +208,14 @@
 	/* We ran out of memory, or some other thing happened to us that
 	   made us unable to handle the page fault gracefully.  */
  out_of_memory:
+	up_read(&mm->mmap_sem);
 	if (!user_mode(regs))
 		goto no_context;
 	pagefault_out_of_memory();
 	return;
 
  do_sigbus:
+	up_read(&mm->mmap_sem);
 	/* Send a sigbus, regardless of whether we were in kernel
 	   or user mode.  */
 	info.si_signo = SIGBUS;
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index a0a5d27..b8ce18f 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -12,6 +12,7 @@
 #include <linux/smp.h>
 #include <linux/errno.h>
 #include <asm/ptrace.h>
+#include <asm/special_insns.h>
 
 #include "op_impl.h"
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e91c7cd..6d6e18f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -38,7 +38,6 @@
 	select HARDIRQS_SW_RESEND
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_SHOW
-	select GENERIC_IRQ_PROBE
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select HARDIRQS_SW_RESEND
 	select CPU_PM if (SUSPEND || CPU_IDLE)
@@ -126,11 +125,6 @@
 	bool
 	default y
 
-config GENERIC_LOCKBREAK
-	bool
-	default y
-	depends on SMP && PREEMPT
-
 config RWSEM_GENERIC_SPINLOCK
 	bool
 	default y
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index a874dbf..e613831 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -51,11 +51,11 @@
 
 			dma-apbh@80004000 {
 				compatible = "fsl,imx23-dma-apbh";
-				reg = <0x80004000 2000>;
+				reg = <0x80004000 0x2000>;
 			};
 
 			ecc@80008000 {
-				reg = <0x80008000 2000>;
+				reg = <0x80008000 0x2000>;
 				status = "disabled";
 			};
 
@@ -63,7 +63,7 @@
 				compatible = "fsl,imx23-gpmi-nand";
 				#address-cells = <1>;
 				#size-cells = <1>;
-				reg = <0x8000c000 2000>, <0x8000a000 2000>;
+				reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>;
 				reg-names = "gpmi-nand", "bch";
 				interrupts = <13>, <56>;
 				interrupt-names = "gpmi-dma", "bch";
@@ -72,14 +72,14 @@
 			};
 
 			ssp0: ssp@80010000 {
-				reg = <0x80010000 2000>;
+				reg = <0x80010000 0x2000>;
 				interrupts = <15 14>;
 				fsl,ssp-dma-channel = <1>;
 				status = "disabled";
 			};
 
 			etm@80014000 {
-				reg = <0x80014000 2000>;
+				reg = <0x80014000 0x2000>;
 				status = "disabled";
 			};
 
@@ -87,7 +87,7 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				compatible = "fsl,imx23-pinctrl", "simple-bus";
-				reg = <0x80018000 2000>;
+				reg = <0x80018000 0x2000>;
 
 				gpio0: gpio@0 {
 					compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
@@ -273,32 +273,32 @@
 			};
 
 			emi@80020000 {
-				reg = <0x80020000 2000>;
+				reg = <0x80020000 0x2000>;
 				status = "disabled";
 			};
 
 			dma-apbx@80024000 {
 				compatible = "fsl,imx23-dma-apbx";
-				reg = <0x80024000 2000>;
+				reg = <0x80024000 0x2000>;
 			};
 
 			dcp@80028000 {
-				reg = <0x80028000 2000>;
+				reg = <0x80028000 0x2000>;
 				status = "disabled";
 			};
 
 			pxp@8002a000 {
-				reg = <0x8002a000 2000>;
+				reg = <0x8002a000 0x2000>;
 				status = "disabled";
 			};
 
 			ocotp@8002c000 {
-				reg = <0x8002c000 2000>;
+				reg = <0x8002c000 0x2000>;
 				status = "disabled";
 			};
 
 			axi-ahb@8002e000 {
-				reg = <0x8002e000 2000>;
+				reg = <0x8002e000 0x2000>;
 				status = "disabled";
 			};
 
@@ -310,14 +310,14 @@
 			};
 
 			ssp1: ssp@80034000 {
-				reg = <0x80034000 2000>;
+				reg = <0x80034000 0x2000>;
 				interrupts = <2 20>;
 				fsl,ssp-dma-channel = <2>;
 				status = "disabled";
 			};
 
 			tvenc@80038000 {
-				reg = <0x80038000 2000>;
+				reg = <0x80038000 0x2000>;
 				status = "disabled";
 			};
                 };
@@ -330,37 +330,37 @@
 			ranges;
 
 			clkctl@80040000 {
-				reg = <0x80040000 2000>;
+				reg = <0x80040000 0x2000>;
 				status = "disabled";
 			};
 
 			saif0: saif@80042000 {
-				reg = <0x80042000 2000>;
+				reg = <0x80042000 0x2000>;
 				status = "disabled";
 			};
 
 			power@80044000 {
-				reg = <0x80044000 2000>;
+				reg = <0x80044000 0x2000>;
 				status = "disabled";
 			};
 
 			saif1: saif@80046000 {
-				reg = <0x80046000 2000>;
+				reg = <0x80046000 0x2000>;
 				status = "disabled";
 			};
 
 			audio-out@80048000 {
-				reg = <0x80048000 2000>;
+				reg = <0x80048000 0x2000>;
 				status = "disabled";
 			};
 
 			audio-in@8004c000 {
-				reg = <0x8004c000 2000>;
+				reg = <0x8004c000 0x2000>;
 				status = "disabled";
 			};
 
 			lradc@80050000 {
-				reg = <0x80050000 2000>;
+				reg = <0x80050000 0x2000>;
 				status = "disabled";
 			};
 
@@ -370,26 +370,26 @@
 			};
 
 			i2c@80058000 {
-				reg = <0x80058000 2000>;
+				reg = <0x80058000 0x2000>;
 				status = "disabled";
 			};
 
 			rtc@8005c000 {
 				compatible = "fsl,imx23-rtc", "fsl,stmp3xxx-rtc";
-				reg = <0x8005c000 2000>;
+				reg = <0x8005c000 0x2000>;
 				interrupts = <22>;
 			};
 
 			pwm: pwm@80064000 {
 				compatible = "fsl,imx23-pwm";
-				reg = <0x80064000 2000>;
+				reg = <0x80064000 0x2000>;
 				#pwm-cells = <2>;
 				fsl,pwm-number = <5>;
 				status = "disabled";
 			};
 
 			timrot@80068000 {
-				reg = <0x80068000 2000>;
+				reg = <0x80068000 0x2000>;
 				status = "disabled";
 			};
 
@@ -429,7 +429,7 @@
 		ranges;
 
 		usbctrl@80080000 {
-			reg = <0x80080000 0x10000>;
+			reg = <0x80080000 0x40000>;
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-3ds.dts
index d3f8296..0a8978a 100644
--- a/arch/arm/boot/dts/imx27-3ds.dts
+++ b/arch/arm/boot/dts/imx27-3ds.dts
@@ -27,7 +27,7 @@
 				status = "okay";
 			};
 
-			uart@1000a000 {
+			uart1: serial@1000a000 {
 				fsl,uart-has-rtscts;
 				status = "okay";
 			};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 00bae3a..5303ab6 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -19,6 +19,12 @@
 		serial3 = &uart4;
 		serial4 = &uart5;
 		serial5 = &uart6;
+		gpio0 = &gpio1;
+		gpio1 = &gpio2;
+		gpio2 = &gpio3;
+		gpio3 = &gpio4;
+		gpio4 = &gpio5;
+		gpio5 = &gpio6;
 	};
 
 	avic: avic-interrupt-controller@e0000000 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 787efac..3fa6d19 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -57,18 +57,18 @@
 			};
 
 			hsadc@80002000 {
-				reg = <0x80002000 2000>;
+				reg = <0x80002000 0x2000>;
 				interrupts = <13 87>;
 				status = "disabled";
 			};
 
 			dma-apbh@80004000 {
 				compatible = "fsl,imx28-dma-apbh";
-				reg = <0x80004000 2000>;
+				reg = <0x80004000 0x2000>;
 			};
 
 			perfmon@80006000 {
-				reg = <0x80006000 800>;
+				reg = <0x80006000 0x800>;
 				interrupts = <27>;
 				status = "disabled";
 			};
@@ -77,7 +77,7 @@
 				compatible = "fsl,imx28-gpmi-nand";
 				#address-cells = <1>;
 				#size-cells = <1>;
-				reg = <0x8000c000 2000>, <0x8000a000 2000>;
+				reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>;
 				reg-names = "gpmi-nand", "bch";
 				interrupts = <88>, <41>;
 				interrupt-names = "gpmi-dma", "bch";
@@ -86,28 +86,28 @@
 			};
 
 			ssp0: ssp@80010000 {
-				reg = <0x80010000 2000>;
+				reg = <0x80010000 0x2000>;
 				interrupts = <96 82>;
 				fsl,ssp-dma-channel = <0>;
 				status = "disabled";
 			};
 
 			ssp1: ssp@80012000 {
-				reg = <0x80012000 2000>;
+				reg = <0x80012000 0x2000>;
 				interrupts = <97 83>;
 				fsl,ssp-dma-channel = <1>;
 				status = "disabled";
 			};
 
 			ssp2: ssp@80014000 {
-				reg = <0x80014000 2000>;
+				reg = <0x80014000 0x2000>;
 				interrupts = <98 84>;
 				fsl,ssp-dma-channel = <2>;
 				status = "disabled";
 			};
 
 			ssp3: ssp@80016000 {
-				reg = <0x80016000 2000>;
+				reg = <0x80016000 0x2000>;
 				interrupts = <99 85>;
 				fsl,ssp-dma-channel = <3>;
 				status = "disabled";
@@ -117,7 +117,7 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				compatible = "fsl,imx28-pinctrl", "simple-bus";
-				reg = <0x80018000 2000>;
+				reg = <0x80018000 0x2000>;
 
 				gpio0: gpio@0 {
 					compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
@@ -510,96 +510,96 @@
 			};
 
 			digctl@8001c000 {
-				reg = <0x8001c000 2000>;
+				reg = <0x8001c000 0x2000>;
 				interrupts = <89>;
 				status = "disabled";
 			};
 
 			etm@80022000 {
-				reg = <0x80022000 2000>;
+				reg = <0x80022000 0x2000>;
 				status = "disabled";
 			};
 
 			dma-apbx@80024000 {
 				compatible = "fsl,imx28-dma-apbx";
-				reg = <0x80024000 2000>;
+				reg = <0x80024000 0x2000>;
 			};
 
 			dcp@80028000 {
-				reg = <0x80028000 2000>;
+				reg = <0x80028000 0x2000>;
 				interrupts = <52 53 54>;
 				status = "disabled";
 			};
 
 			pxp@8002a000 {
-				reg = <0x8002a000 2000>;
+				reg = <0x8002a000 0x2000>;
 				interrupts = <39>;
 				status = "disabled";
 			};
 
 			ocotp@8002c000 {
-				reg = <0x8002c000 2000>;
+				reg = <0x8002c000 0x2000>;
 				status = "disabled";
 			};
 
 			axi-ahb@8002e000 {
-				reg = <0x8002e000 2000>;
+				reg = <0x8002e000 0x2000>;
 				status = "disabled";
 			};
 
 			lcdif@80030000 {
 				compatible = "fsl,imx28-lcdif";
-				reg = <0x80030000 2000>;
+				reg = <0x80030000 0x2000>;
 				interrupts = <38 86>;
 				status = "disabled";
 			};
 
 			can0: can@80032000 {
 				compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
-				reg = <0x80032000 2000>;
+				reg = <0x80032000 0x2000>;
 				interrupts = <8>;
 				status = "disabled";
 			};
 
 			can1: can@80034000 {
 				compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
-				reg = <0x80034000 2000>;
+				reg = <0x80034000 0x2000>;
 				interrupts = <9>;
 				status = "disabled";
 			};
 
 			simdbg@8003c000 {
-				reg = <0x8003c000 200>;
+				reg = <0x8003c000 0x200>;
 				status = "disabled";
 			};
 
 			simgpmisel@8003c200 {
-				reg = <0x8003c200 100>;
+				reg = <0x8003c200 0x100>;
 				status = "disabled";
 			};
 
 			simsspsel@8003c300 {
-				reg = <0x8003c300 100>;
+				reg = <0x8003c300 0x100>;
 				status = "disabled";
 			};
 
 			simmemsel@8003c400 {
-				reg = <0x8003c400 100>;
+				reg = <0x8003c400 0x100>;
 				status = "disabled";
 			};
 
 			gpiomon@8003c500 {
-				reg = <0x8003c500 100>;
+				reg = <0x8003c500 0x100>;
 				status = "disabled";
 			};
 
 			simenet@8003c700 {
-				reg = <0x8003c700 100>;
+				reg = <0x8003c700 0x100>;
 				status = "disabled";
 			};
 
 			armjtag@8003c800 {
-				reg = <0x8003c800 100>;
+				reg = <0x8003c800 0x100>;
 				status = "disabled";
 			};
                 };
@@ -612,45 +612,45 @@
 			ranges;
 
 			clkctl@80040000 {
-				reg = <0x80040000 2000>;
+				reg = <0x80040000 0x2000>;
 				status = "disabled";
 			};
 
 			saif0: saif@80042000 {
 				compatible = "fsl,imx28-saif";
-				reg = <0x80042000 2000>;
+				reg = <0x80042000 0x2000>;
 				interrupts = <59 80>;
 				fsl,saif-dma-channel = <4>;
 				status = "disabled";
 			};
 
 			power@80044000 {
-				reg = <0x80044000 2000>;
+				reg = <0x80044000 0x2000>;
 				status = "disabled";
 			};
 
 			saif1: saif@80046000 {
 				compatible = "fsl,imx28-saif";
-				reg = <0x80046000 2000>;
+				reg = <0x80046000 0x2000>;
 				interrupts = <58 81>;
 				fsl,saif-dma-channel = <5>;
 				status = "disabled";
 			};
 
 			lradc@80050000 {
-				reg = <0x80050000 2000>;
+				reg = <0x80050000 0x2000>;
 				status = "disabled";
 			};
 
 			spdif@80054000 {
-				reg = <0x80054000 2000>;
+				reg = <0x80054000 0x2000>;
 				interrupts = <45 66>;
 				status = "disabled";
 			};
 
 			rtc@80056000 {
 				compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc";
-				reg = <0x80056000 2000>;
+				reg = <0x80056000 0x2000>;
 				interrupts = <29>;
 			};
 
@@ -658,7 +658,7 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				compatible = "fsl,imx28-i2c";
-				reg = <0x80058000 2000>;
+				reg = <0x80058000 0x2000>;
 				interrupts = <111 68>;
 				clock-frequency = <100000>;
 				status = "disabled";
@@ -668,7 +668,7 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				compatible = "fsl,imx28-i2c";
-				reg = <0x8005a000 2000>;
+				reg = <0x8005a000 0x2000>;
 				interrupts = <110 69>;
 				clock-frequency = <100000>;
 				status = "disabled";
@@ -676,14 +676,14 @@
 
 			pwm: pwm@80064000 {
 				compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
-				reg = <0x80064000 2000>;
+				reg = <0x80064000 0x2000>;
 				#pwm-cells = <2>;
 				fsl,pwm-number = <8>;
 				status = "disabled";
 			};
 
 			timrot@80068000 {
-				reg = <0x80068000 2000>;
+				reg = <0x80068000 0x2000>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index de065b5..cd86177 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -53,7 +53,7 @@
 						spi-max-frequency = <6000000>;
 						reg = <0>;
 						interrupt-parent = <&gpio1>;
-						interrupts = <8>;
+						interrupts = <8 0x4>;
 
 						regulators {
 							sw1_reg: sw1 {
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 53cbaa3..aba28dc 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -17,6 +17,10 @@
 		serial0 = &uart1;
 		serial1 = &uart2;
 		serial2 = &uart3;
+		gpio0 = &gpio1;
+		gpio1 = &gpio2;
+		gpio2 = &gpio3;
+		gpio3 = &gpio4;
 	};
 
 	tzic: tz-interrupt-controller@e0000000 {
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 5b8eafc..da895e9 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -64,12 +64,32 @@
 			reg = <0xf4000000 0x2000000>;
 			phy-mode = "mii";
 			interrupt-parent = <&gpio2>;
-			interrupts = <31>;
+			interrupts = <31 0x8>;
 			reg-io-width = <4>;
+			/*
+			 * VDD33A and VDDVARIO of LAN9220 are supplied by
+			 * SW4_3V3 of LTC3589.  Before the regulator driver
+			 * for this PMIC is available, we use a fixed dummy
+			 * 3V3 regulator to get LAN9220 driver probing work.
+			 */
+			vdd33a-supply = <&reg_3p3v>;
+			vddvario-supply = <&reg_3p3v>;
 			smsc,irq-push-pull;
 		};
 	};
 
+	regulators {
+		compatible = "simple-bus";
+
+		reg_3p3v: 3p3v {
+			compatible = "regulator-fixed";
+			regulator-name = "3P3V";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+	};
+
 	gpio-keys {
 		compatible = "gpio-keys";
 
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index fc79cdc..cd37165 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -19,6 +19,13 @@
 		serial2 = &uart3;
 		serial3 = &uart4;
 		serial4 = &uart5;
+		gpio0 = &gpio1;
+		gpio1 = &gpio2;
+		gpio2 = &gpio3;
+		gpio3 = &gpio4;
+		gpio4 = &gpio5;
+		gpio5 = &gpio6;
+		gpio6 = &gpio7;
 	};
 
 	tzic: tz-interrupt-controller@0fffc000 {
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index d42e851..72f30f3 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -53,6 +53,7 @@
 						fsl,pins = <
 							   144  0x80000000	/* MX6Q_PAD_EIM_D22__GPIO_3_22 */
 							   121  0x80000000	/* MX6Q_PAD_EIM_D19__GPIO_3_19 */
+							   953  0x80000000	/* MX6Q_PAD_GPIO_0__CCM_CLKO */
 							   >;
 					};
 				};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 3d3c64b..fd57079 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -19,6 +19,13 @@
 		serial2 = &uart3;
 		serial3 = &uart4;
 		serial4 = &uart5;
+		gpio0 = &gpio1;
+		gpio1 = &gpio2;
+		gpio2 = &gpio3;
+		gpio3 = &gpio4;
+		gpio4 = &gpio5;
+		gpio5 = &gpio6;
+		gpio6 = &gpio7;
 	};
 
 	cpus {
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f725b96..3c9f32f 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -192,6 +192,7 @@
 CONFIG_RTC_DRV_MXC=y
 CONFIG_DMADEVICES=y
 CONFIG_IMX_SDMA=y
+CONFIG_MXS_DMA=y
 CONFIG_COMMON_CLK_DEBUG=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index ccdb635..4edcfb4 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -34,7 +34,6 @@
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_AEABI=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
 CONFIG_AUTO_ZRELADDR=y
 CONFIG_FPE_NWFPE=y
 CONFIG_NET=y
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index 1d24f845..71277a1 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -7,7 +7,7 @@
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_BUG is not set
+# CONFIG_BUGVERBOSE is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_SHMEM is not set
 CONFIG_SLOB=y
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f66626d..41dc31f 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -195,25 +195,6 @@
 
 #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
 
-#if __LINUX_ARM_ARCH__ < 6
-static inline void __sync_icache_dcache(pte_t pteval)
-{
-}
-#else
-extern void __sync_icache_dcache(pte_t pteval);
-#endif
-
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
-			      pte_t *ptep, pte_t pteval)
-{
-	if (addr >= TASK_SIZE)
-		set_pte_ext(ptep, pteval, 0);
-	else {
-		__sync_icache_dcache(pteval);
-		set_pte_ext(ptep, pteval, PTE_EXT_NG);
-	}
-}
-
 #define pte_none(pte)		(!pte_val(pte))
 #define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
 #define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY))
@@ -226,6 +207,27 @@
 	((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
 	 (L_PTE_PRESENT | L_PTE_USER))
 
+#if __LINUX_ARM_ARCH__ < 6
+static inline void __sync_icache_dcache(pte_t pteval)
+{
+}
+#else
+extern void __sync_icache_dcache(pte_t pteval);
+#endif
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, pte_t pteval)
+{
+	unsigned long ext = 0;
+
+	if (addr < TASK_SIZE && pte_present_user(pteval)) {
+		__sync_icache_dcache(pteval);
+		ext |= PTE_EXT_NG;
+	}
+
+	set_pte_ext(ptep, pteval, ext);
+}
+
 #define PTE_BIT_FUNC(fn,op) \
 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
 
@@ -251,13 +253,13 @@
  *
  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- *   <--------------- offset --------------------> <- type --> 0 0 0
+ *   <--------------- offset ----------------------> < type -> 0 0 0
  *
- * This gives us up to 63 swap files and 32GB per swap file.  Note that
+ * This gives us up to 31 swap files and 64GB per swap file.  Note that
  * the offset field is always non-zero.
  */
 #define __SWP_TYPE_SHIFT	3
-#define __SWP_TYPE_BITS		6
+#define __SWP_TYPE_BITS		5
 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
 
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index e3f7572..05b8e82 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -10,5 +10,7 @@
 
 extern void sched_clock_postinit(void);
 extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
+extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+		unsigned long rate);
 
 #endif
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 27d186a..f451539 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -21,6 +21,8 @@
 	u32 epoch_cyc_copy;
 	u32 mult;
 	u32 shift;
+	bool suspended;
+	bool needs_suspend;
 };
 
 static void sched_clock_poll(unsigned long wrap_ticks);
@@ -49,6 +51,9 @@
 	u64 epoch_ns;
 	u32 epoch_cyc;
 
+	if (cd.suspended)
+		return cd.epoch_ns;
+
 	/*
 	 * Load the epoch_cyc and epoch_ns atomically.  We do this by
 	 * ensuring that we always write epoch_cyc, epoch_ns and
@@ -98,6 +103,13 @@
 	update_sched_clock();
 }
 
+void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+		unsigned long rate)
+{
+	setup_sched_clock(read, bits, rate);
+	cd.needs_suspend = true;
+}
+
 void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
 {
 	unsigned long r, w;
@@ -169,11 +181,23 @@
 static int sched_clock_suspend(void)
 {
 	sched_clock_poll(sched_clock_timer.data);
+	if (cd.needs_suspend)
+		cd.suspended = true;
 	return 0;
 }
 
+static void sched_clock_resume(void)
+{
+	if (cd.needs_suspend) {
+		cd.epoch_cyc = read_sched_clock();
+		cd.epoch_cyc_copy = cd.epoch_cyc;
+		cd.suspended = false;
+	}
+}
+
 static struct syscore_ops sched_clock_ops = {
 	.suspend = sched_clock_suspend,
+	.resume = sched_clock_resume,
 };
 
 static int __init sched_clock_syscore_init(void)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 198b0845..26c12c6 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -321,7 +321,7 @@
  * init_cpu_topology is called at boot when only one cpu is running
  * which prevent simultaneous write access to cpu_topology array
  */
-void init_cpu_topology(void)
+void __init init_cpu_topology(void)
 {
 	unsigned int cpu;
 
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 2473fd1..af72969 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -16,13 +16,30 @@
 		   call_with_stack.o
 
 mmu-y	:= clear_user.o copy_page.o getuser.o putuser.o
-mmu-y	+= copy_from_user.o copy_to_user.o
+
+# the code in uaccess.S is not preemption safe and
+# probably faster on ARMv3 only
+ifeq ($(CONFIG_PREEMPT),y)
+  mmu-y	+= copy_from_user.o copy_to_user.o
+else
+ifneq ($(CONFIG_CPU_32v3),y)
+  mmu-y	+= copy_from_user.o copy_to_user.o
+else
+  mmu-y	+= uaccess.o
+endif
+endif
 
 # using lib_ here won't override already available weak symbols
 obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
 
-lib-$(CONFIG_MMU)		+= $(mmu-y)
-lib-y				+= io-readsw-armv4.o io-writesw-armv4.o
+lib-$(CONFIG_MMU) += $(mmu-y)
+
+ifeq ($(CONFIG_CPU_32v3),y)
+  lib-y	+= io-readsw-armv3.o io-writesw-armv3.o
+else
+  lib-y	+= io-readsw-armv4.o io-writesw-armv4.o
+endif
+
 lib-$(CONFIG_ARCH_RPC)		+= ecard.o io-acorn.o floppydma.o
 lib-$(CONFIG_ARCH_SHARK)	+= io-shark.o
 
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
new file mode 100644
index 0000000..88487c8
--- /dev/null
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -0,0 +1,106 @@
+/*
+ *  linux/arch/arm/lib/io-readsw-armv3.S
+ *
+ *  Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.Linsw_bad_alignment:
+		adr	r0, .Linsw_bad_align_msg
+		mov	r2, lr
+		b	panic
+.Linsw_bad_align_msg:
+		.asciz	"insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
+		.align
+
+.Linsw_align:	tst	r1, #1
+		bne	.Linsw_bad_alignment
+
+		ldr	r3, [r0]
+		strb	r3, [r1], #1
+		mov	r3, r3, lsr #8
+		strb	r3, [r1], #1
+
+		subs	r2, r2, #1
+		moveq	pc, lr
+
+ENTRY(__raw_readsw)
+		teq	r2, #0		@ do we have to check for the zero len?
+		moveq	pc, lr
+		tst	r1, #3
+		bne	.Linsw_align
+
+.Linsw_aligned:	mov	ip, #0xff
+		orr	ip, ip, ip, lsl #8
+		stmfd	sp!, {r4, r5, r6, lr}
+
+		subs	r2, r2, #8
+		bmi	.Lno_insw_8
+
+.Linsw_8_lp:	ldr	r3, [r0]
+		and	r3, r3, ip
+		ldr	r4, [r0]
+		orr	r3, r3, r4, lsl #16
+
+		ldr	r4, [r0]
+		and	r4, r4, ip
+		ldr	r5, [r0]
+		orr	r4, r4, r5, lsl #16
+
+		ldr	r5, [r0]
+		and	r5, r5, ip
+		ldr	r6, [r0]
+		orr	r5, r5, r6, lsl #16
+
+		ldr	r6, [r0]
+		and	r6, r6, ip
+		ldr	lr, [r0]
+		orr	r6, r6, lr, lsl #16
+
+		stmia	r1!, {r3 - r6}
+
+		subs	r2, r2, #8
+		bpl	.Linsw_8_lp
+
+		tst	r2, #7
+		ldmeqfd	sp!, {r4, r5, r6, pc}
+
+.Lno_insw_8:	tst	r2, #4
+		beq	.Lno_insw_4
+
+		ldr	r3, [r0]
+		and	r3, r3, ip
+		ldr	r4, [r0]
+		orr	r3, r3, r4, lsl #16
+
+		ldr	r4, [r0]
+		and	r4, r4, ip
+		ldr	r5, [r0]
+		orr	r4, r4, r5, lsl #16
+
+		stmia	r1!, {r3, r4}
+
+.Lno_insw_4:	tst	r2, #2
+		beq	.Lno_insw_2
+
+		ldr	r3, [r0]
+		and	r3, r3, ip
+		ldr	r4, [r0]
+		orr	r3, r3, r4, lsl #16
+
+		str	r3, [r1], #4
+
+.Lno_insw_2:	tst	r2, #1
+		ldrne	r3, [r0]
+		strneb	r3, [r1], #1
+		movne	r3, r3, lsr #8
+		strneb	r3, [r1]
+
+		ldmfd	sp!, {r4, r5, r6, pc}
+
+
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
new file mode 100644
index 0000000..49b8004
--- /dev/null
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -0,0 +1,126 @@
+/*
+ *  linux/arch/arm/lib/io-writesw-armv3.S
+ *
+ *  Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.Loutsw_bad_alignment:
+		adr	r0, .Loutsw_bad_align_msg
+		mov	r2, lr
+		b	panic
+.Loutsw_bad_align_msg:
+		.asciz	"outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
+		.align
+
+.Loutsw_align:	tst	r1, #1
+		bne	.Loutsw_bad_alignment
+
+		add	r1, r1, #2
+
+		ldr	r3, [r1, #-4]
+		mov	r3, r3, lsr #16
+		orr	r3, r3, r3, lsl #16
+		str	r3, [r0]
+		subs	r2, r2, #1
+		moveq	pc, lr
+
+ENTRY(__raw_writesw)
+		teq	r2, #0		@ do we have to check for the zero len?
+		moveq	pc, lr
+		tst	r1, #3
+		bne	.Loutsw_align
+
+		stmfd	sp!, {r4, r5, r6, lr}
+
+		subs	r2, r2, #8
+		bmi	.Lno_outsw_8
+
+.Loutsw_8_lp:	ldmia	r1!, {r3, r4, r5, r6}
+
+		mov	ip, r3, lsl #16
+		orr	ip, ip, ip, lsr #16
+		str	ip, [r0]
+
+		mov	ip, r3, lsr #16
+		orr	ip, ip, ip, lsl #16
+		str	ip, [r0]
+
+		mov	ip, r4, lsl #16
+		orr	ip, ip, ip, lsr #16
+		str	ip, [r0]
+
+		mov	ip, r4, lsr #16
+		orr	ip, ip, ip, lsl #16
+		str	ip, [r0]
+
+		mov	ip, r5, lsl #16
+		orr	ip, ip, ip, lsr #16
+		str	ip, [r0]
+
+		mov	ip, r5, lsr #16
+		orr	ip, ip, ip, lsl #16
+		str	ip, [r0]
+
+		mov	ip, r6, lsl #16
+		orr	ip, ip, ip, lsr #16
+		str	ip, [r0]
+
+		mov	ip, r6, lsr #16
+		orr	ip, ip, ip, lsl #16
+		str	ip, [r0]
+
+		subs	r2, r2, #8
+		bpl	.Loutsw_8_lp
+
+		tst	r2, #7
+		ldmeqfd	sp!, {r4, r5, r6, pc}
+
+.Lno_outsw_8:	tst	r2, #4
+		beq	.Lno_outsw_4
+
+		ldmia	r1!, {r3, r4}
+
+		mov	ip, r3, lsl #16
+		orr	ip, ip, ip, lsr #16
+		str	ip, [r0]
+
+		mov	ip, r3, lsr #16
+		orr	ip, ip, ip, lsl #16
+		str	ip, [r0]
+
+		mov	ip, r4, lsl #16
+		orr	ip, ip, ip, lsr #16
+		str	ip, [r0]
+
+		mov	ip, r4, lsr #16
+		orr	ip, ip, ip, lsl #16
+		str	ip, [r0]
+
+.Lno_outsw_4:	tst	r2, #2
+		beq	.Lno_outsw_2
+
+		ldr	r3, [r1], #4
+
+		mov	ip, r3, lsl #16
+		orr	ip, ip, ip, lsr #16
+		str	ip, [r0]
+
+		mov	ip, r3, lsr #16
+		orr	ip, ip, ip, lsl #16
+		str	ip, [r0]
+
+.Lno_outsw_2:	tst	r2, #1
+
+		ldrne	r3, [r1]
+
+		movne	ip, r3, lsl #16
+		orrne	ip, ip, ip, lsr #16
+		strne	ip, [r0]
+
+		ldmfd	sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
new file mode 100644
index 0000000..5c908b1
--- /dev/null
+++ b/arch/arm/lib/uaccess.S
@@ -0,0 +1,564 @@
+/*
+ *  linux/arch/arm/lib/uaccess.S
+ *
+ *  Copyright (C) 1995, 1996,1997,1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Routines to block copy data to/from user memory
+ *   These are highly optimised both for the 4k page size
+ *   and for various alignments.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/domain.h>
+
+		.text
+
+#define PAGE_SHIFT 12
+
+/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
+ * Purpose  : copy a block to user memory from kernel memory
+ * Params   : to   - user memory
+ *          : from - kernel memory
+ *          : n    - number of bytes to copy
+ * Returns  : Number of bytes NOT copied.
+ */
+
+.Lc2u_dest_not_aligned:
+		rsb	ip, ip, #4
+		cmp	ip, #2
+		ldrb	r3, [r1], #1
+USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
+		ldrgeb	r3, [r1], #1
+USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
+		ldrgtb	r3, [r1], #1
+USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
+		sub	r2, r2, ip
+		b	.Lc2u_dest_aligned
+
+ENTRY(__copy_to_user)
+		stmfd	sp!, {r2, r4 - r7, lr}
+		cmp	r2, #4
+		blt	.Lc2u_not_enough
+		ands	ip, r0, #3
+		bne	.Lc2u_dest_not_aligned
+.Lc2u_dest_aligned:
+
+		ands	ip, r1, #3
+		bne	.Lc2u_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.Lc2u_0fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.Lc2u_0nowords
+		ldr	r3, [r1], #4
+USER(	TUSER(	str)	r3, [r0], #4)			@ May fault
+		mov	ip, r0, lsl #32 - PAGE_SHIFT	@ On each page, use a ld/st??t instruction
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.Lc2u_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #32
+		blt	.Lc2u_0rem8lp
+
+.Lc2u_0cpy8lp:	ldmia	r1!, {r3 - r6}
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		ldmia	r1!, {r3 - r6}
+		subs	ip, ip, #32
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		bpl	.Lc2u_0cpy8lp
+
+.Lc2u_0rem8lp:	cmn	ip, #16
+		ldmgeia	r1!, {r3 - r6}
+		stmgeia	r0!, {r3 - r6}			@ Shouldnt fault
+		tst	ip, #8
+		ldmneia	r1!, {r3 - r4}
+		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
+		tst	ip, #4
+		ldrne	r3, [r1], #4
+	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault
+		ands	ip, ip, #3
+		beq	.Lc2u_0fupi
+.Lc2u_0nowords:	teq	ip, #0
+		beq	.Lc2u_finished
+.Lc2u_nowords:	cmp	ip, #2
+		ldrb	r3, [r1], #1
+USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
+		ldrgeb	r3, [r1], #1
+USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
+		ldrgtb	r3, [r1], #1
+USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
+		b	.Lc2u_finished
+
+.Lc2u_not_enough:
+		movs	ip, r2
+		bne	.Lc2u_nowords
+.Lc2u_finished:	mov	r0, #0
+		ldmfd	sp!, {r2, r4 - r7, pc}
+
+.Lc2u_src_not_aligned:
+		bic	r1, r1, #3
+		ldr	r7, [r1], #4
+		cmp	ip, #2
+		bgt	.Lc2u_3fupi
+		beq	.Lc2u_2fupi
+.Lc2u_1fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.Lc2u_1nowords
+		mov	r3, r7, pull #8
+		ldr	r7, [r1], #4
+		orr	r3, r3, r7, push #24
+USER(	TUSER(	str)	r3, [r0], #4)			@ May fault
+		mov	ip, r0, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.Lc2u_1fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.Lc2u_1rem8lp
+
+.Lc2u_1cpy8lp:	mov	r3, r7, pull #8
+		ldmia	r1!, {r4 - r7}
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #24
+		mov	r4, r4, pull #8
+		orr	r4, r4, r5, push #24
+		mov	r5, r5, pull #8
+		orr	r5, r5, r6, push #24
+		mov	r6, r6, pull #8
+		orr	r6, r6, r7, push #24
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		bpl	.Lc2u_1cpy8lp
+
+.Lc2u_1rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #8
+		ldmneia	r1!, {r4, r7}
+		orrne	r3, r3, r4, push #24
+		movne	r4, r4, pull #8
+		orrne	r4, r4, r7, push #24
+		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
+		tst	ip, #4
+		movne	r3, r7, pull #8
+		ldrne	r7, [r1], #4
+		orrne	r3, r3, r7, push #24
+	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault
+		ands	ip, ip, #3
+		beq	.Lc2u_1fupi
+.Lc2u_1nowords:	mov	r3, r7, get_byte_1
+		teq	ip, #0
+		beq	.Lc2u_finished
+		cmp	ip, #2
+USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
+		movge	r3, r7, get_byte_2
+USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
+		movgt	r3, r7, get_byte_3
+USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
+		b	.Lc2u_finished
+
+.Lc2u_2fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.Lc2u_2nowords
+		mov	r3, r7, pull #16
+		ldr	r7, [r1], #4
+		orr	r3, r3, r7, push #16
+USER(	TUSER(	str)	r3, [r0], #4)			@ May fault
+		mov	ip, r0, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.Lc2u_2fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.Lc2u_2rem8lp
+
+.Lc2u_2cpy8lp:	mov	r3, r7, pull #16
+		ldmia	r1!, {r4 - r7}
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #16
+		mov	r4, r4, pull #16
+		orr	r4, r4, r5, push #16
+		mov	r5, r5, pull #16
+		orr	r5, r5, r6, push #16
+		mov	r6, r6, pull #16
+		orr	r6, r6, r7, push #16
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		bpl	.Lc2u_2cpy8lp
+
+.Lc2u_2rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #16
+		ldmneia	r1!, {r4, r7}
+		orrne	r3, r3, r4, push #16
+		movne	r4, r4, pull #16
+		orrne	r4, r4, r7, push #16
+		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
+		tst	ip, #4
+		movne	r3, r7, pull #16
+		ldrne	r7, [r1], #4
+		orrne	r3, r3, r7, push #16
+	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault
+		ands	ip, ip, #3
+		beq	.Lc2u_2fupi
+.Lc2u_2nowords:	mov	r3, r7, get_byte_2
+		teq	ip, #0
+		beq	.Lc2u_finished
+		cmp	ip, #2
+USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
+		movge	r3, r7, get_byte_3
+USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
+		ldrgtb	r3, [r1], #0
+USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
+		b	.Lc2u_finished
+
+.Lc2u_3fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.Lc2u_3nowords
+		mov	r3, r7, pull #24
+		ldr	r7, [r1], #4
+		orr	r3, r3, r7, push #8
+USER(	TUSER(	str)	r3, [r0], #4)			@ May fault
+		mov	ip, r0, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.Lc2u_3fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.Lc2u_3rem8lp
+
+.Lc2u_3cpy8lp:	mov	r3, r7, pull #24
+		ldmia	r1!, {r4 - r7}
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #8
+		mov	r4, r4, pull #24
+		orr	r4, r4, r5, push #8
+		mov	r5, r5, pull #24
+		orr	r5, r5, r6, push #8
+		mov	r6, r6, pull #24
+		orr	r6, r6, r7, push #8
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		bpl	.Lc2u_3cpy8lp
+
+.Lc2u_3rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #24
+		ldmneia	r1!, {r4, r7}
+		orrne	r3, r3, r4, push #8
+		movne	r4, r4, pull #24
+		orrne	r4, r4, r7, push #8
+		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
+		tst	ip, #4
+		movne	r3, r7, pull #24
+		ldrne	r7, [r1], #4
+		orrne	r3, r3, r7, push #8
+	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault
+		ands	ip, ip, #3
+		beq	.Lc2u_3fupi
+.Lc2u_3nowords:	mov	r3, r7, get_byte_3
+		teq	ip, #0
+		beq	.Lc2u_finished
+		cmp	ip, #2
+USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
+		ldrgeb	r3, [r1], #1
+USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
+		ldrgtb	r3, [r1], #0
+USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
+		b	.Lc2u_finished
+ENDPROC(__copy_to_user)
+
+		.pushsection .fixup,"ax"
+		.align	0
+9001:		ldmfd	sp!, {r0, r4 - r7, pc}
+		.popsection
+
+/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose  : copy a block from user memory to kernel memory
+ * Params   : to   - kernel memory
+ *          : from - user memory
+ *          : n    - number of bytes to copy
+ * Returns  : Number of bytes NOT copied.
+ */
+.Lcfu_dest_not_aligned:
+		rsb	ip, ip, #4
+		cmp	ip, #2
+USER(	TUSER(	ldrb)	r3, [r1], #1)			@ May fault
+		strb	r3, [r0], #1
+USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault
+		strgeb	r3, [r0], #1
+USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault
+		strgtb	r3, [r0], #1
+		sub	r2, r2, ip
+		b	.Lcfu_dest_aligned
+
+ENTRY(__copy_from_user)
+		stmfd	sp!, {r0, r2, r4 - r7, lr}
+		cmp	r2, #4
+		blt	.Lcfu_not_enough
+		ands	ip, r0, #3
+		bne	.Lcfu_dest_not_aligned
+.Lcfu_dest_aligned:
+		ands	ip, r1, #3
+		bne	.Lcfu_src_not_aligned
+
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.Lcfu_0fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.Lcfu_0nowords
+USER(	TUSER(	ldr)	r3, [r1], #4)
+		str	r3, [r0], #4
+		mov	ip, r1, lsl #32 - PAGE_SHIFT	@ On each page, use a ld/st??t instruction
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.Lcfu_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #32
+		blt	.Lcfu_0rem8lp
+
+.Lcfu_0cpy8lp:	ldmia	r1!, {r3 - r6}			@ Shouldnt fault
+		stmia	r0!, {r3 - r6}
+		ldmia	r1!, {r3 - r6}			@ Shouldnt fault
+		subs	ip, ip, #32
+		stmia	r0!, {r3 - r6}
+		bpl	.Lcfu_0cpy8lp
+
+.Lcfu_0rem8lp:	cmn	ip, #16
+		ldmgeia	r1!, {r3 - r6}			@ Shouldnt fault
+		stmgeia	r0!, {r3 - r6}
+		tst	ip, #8
+		ldmneia	r1!, {r3 - r4}			@ Shouldnt fault
+		stmneia	r0!, {r3 - r4}
+		tst	ip, #4
+	TUSER(	ldrne) r3, [r1], #4			@ Shouldnt fault
+		strne	r3, [r0], #4
+		ands	ip, ip, #3
+		beq	.Lcfu_0fupi
+.Lcfu_0nowords:	teq	ip, #0
+		beq	.Lcfu_finished
+.Lcfu_nowords:	cmp	ip, #2
+USER(	TUSER(	ldrb)	r3, [r1], #1)			@ May fault
+		strb	r3, [r0], #1
+USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault
+		strgeb	r3, [r0], #1
+USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault
+		strgtb	r3, [r0], #1
+		b	.Lcfu_finished
+
+.Lcfu_not_enough:
+		movs	ip, r2
+		bne	.Lcfu_nowords
+.Lcfu_finished:	mov	r0, #0
+		add	sp, sp, #8
+		ldmfd	sp!, {r4 - r7, pc}
+
+.Lcfu_src_not_aligned:
+		bic	r1, r1, #3
+USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault
+		cmp	ip, #2
+		bgt	.Lcfu_3fupi
+		beq	.Lcfu_2fupi
+.Lcfu_1fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.Lcfu_1nowords
+		mov	r3, r7, pull #8
+USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault
+		orr	r3, r3, r7, push #24
+		str	r3, [r0], #4
+		mov	ip, r1, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.Lcfu_1fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.Lcfu_1rem8lp
+
+.Lcfu_1cpy8lp:	mov	r3, r7, pull #8
+		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #24
+		mov	r4, r4, pull #8
+		orr	r4, r4, r5, push #24
+		mov	r5, r5, pull #8
+		orr	r5, r5, r6, push #24
+		mov	r6, r6, pull #8
+		orr	r6, r6, r7, push #24
+		stmia	r0!, {r3 - r6}
+		bpl	.Lcfu_1cpy8lp
+
+.Lcfu_1rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #8
+		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
+		orrne	r3, r3, r4, push #24
+		movne	r4, r4, pull #8
+		orrne	r4, r4, r7, push #24
+		stmneia	r0!, {r3 - r4}
+		tst	ip, #4
+		movne	r3, r7, pull #8
+USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault
+		orrne	r3, r3, r7, push #24
+		strne	r3, [r0], #4
+		ands	ip, ip, #3
+		beq	.Lcfu_1fupi
+.Lcfu_1nowords:	mov	r3, r7, get_byte_1
+		teq	ip, #0
+		beq	.Lcfu_finished
+		cmp	ip, #2
+		strb	r3, [r0], #1
+		movge	r3, r7, get_byte_2
+		strgeb	r3, [r0], #1
+		movgt	r3, r7, get_byte_3
+		strgtb	r3, [r0], #1
+		b	.Lcfu_finished
+
+.Lcfu_2fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.Lcfu_2nowords
+		mov	r3, r7, pull #16
+USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault
+		orr	r3, r3, r7, push #16
+		str	r3, [r0], #4
+		mov	ip, r1, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.Lcfu_2fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.Lcfu_2rem8lp
+
+
+.Lcfu_2cpy8lp:	mov	r3, r7, pull #16
+		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #16
+		mov	r4, r4, pull #16
+		orr	r4, r4, r5, push #16
+		mov	r5, r5, pull #16
+		orr	r5, r5, r6, push #16
+		mov	r6, r6, pull #16
+		orr	r6, r6, r7, push #16
+		stmia	r0!, {r3 - r6}
+		bpl	.Lcfu_2cpy8lp
+
+.Lcfu_2rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #16
+		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
+		orrne	r3, r3, r4, push #16
+		movne	r4, r4, pull #16
+		orrne	r4, r4, r7, push #16
+		stmneia	r0!, {r3 - r4}
+		tst	ip, #4
+		movne	r3, r7, pull #16
+USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault
+		orrne	r3, r3, r7, push #16
+		strne	r3, [r0], #4
+		ands	ip, ip, #3
+		beq	.Lcfu_2fupi
+.Lcfu_2nowords:	mov	r3, r7, get_byte_2
+		teq	ip, #0
+		beq	.Lcfu_finished
+		cmp	ip, #2
+		strb	r3, [r0], #1
+		movge	r3, r7, get_byte_3
+		strgeb	r3, [r0], #1
+USER(	TUSER(	ldrgtb) r3, [r1], #0)			@ May fault
+		strgtb	r3, [r0], #1
+		b	.Lcfu_finished
+
+.Lcfu_3fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.Lcfu_3nowords
+		mov	r3, r7, pull #24
+USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault
+		orr	r3, r3, r7, push #8
+		str	r3, [r0], #4
+		mov	ip, r1, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.Lcfu_3fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.Lcfu_3rem8lp
+
+.Lcfu_3cpy8lp:	mov	r3, r7, pull #24
+		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
+		orr	r3, r3, r4, push #8
+		mov	r4, r4, pull #24
+		orr	r4, r4, r5, push #8
+		mov	r5, r5, pull #24
+		orr	r5, r5, r6, push #8
+		mov	r6, r6, pull #24
+		orr	r6, r6, r7, push #8
+		stmia	r0!, {r3 - r6}
+		subs	ip, ip, #16
+		bpl	.Lcfu_3cpy8lp
+
+.Lcfu_3rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #24
+		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
+		orrne	r3, r3, r4, push #8
+		movne	r4, r4, pull #24
+		orrne	r4, r4, r7, push #8
+		stmneia	r0!, {r3 - r4}
+		tst	ip, #4
+		movne	r3, r7, pull #24
+USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault
+		orrne	r3, r3, r7, push #8
+		strne	r3, [r0], #4
+		ands	ip, ip, #3
+		beq	.Lcfu_3fupi
+.Lcfu_3nowords:	mov	r3, r7, get_byte_3
+		teq	ip, #0
+		beq	.Lcfu_finished
+		cmp	ip, #2
+		strb	r3, [r0], #1
+USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault
+		strgeb	r3, [r0], #1
+USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault
+		strgtb	r3, [r0], #1
+		b	.Lcfu_finished
+ENDPROC(__copy_from_user)
+
+		.pushsection .fixup,"ax"
+		.align	0
+		/*
+		 * We took an exception.  r0 contains a pointer to
+		 * the byte not copied.
+		 */
+9001:		ldr	r2, [sp], #4			@ void *to
+		sub	r2, r0, r2			@ bytes copied
+		ldr	r1, [sp], #4			@ unsigned long count
+		subs	r4, r1, r2			@ bytes left to copy
+		movne	r1, r4
+		blne	__memzero
+		mov	r0, r4
+		ldmfd	sp!, {r4 - r7, pc}
+		.popsection
+
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 5de69f2..f6b9fc7 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -162,38 +162,6 @@
 	dm644x_init();
 }
 
-/*
- I2C initialization
-*/
-static struct davinci_i2c_platform_data ntosd2_i2c_pdata = {
-	.bus_freq	= 20 /* kHz */,
-	.bus_delay	= 100 /* usec */,
-};
-
-static struct i2c_board_info __initdata ntosd2_i2c_info[] =  {
-};
-
-static	int ntosd2_init_i2c(void)
-{
-	int	status;
-
-	davinci_init_i2c(&ntosd2_i2c_pdata);
-	status = gpio_request(NTOSD2_MSP430_IRQ, ntosd2_i2c_info[0].type);
-	if (status == 0) {
-		status = gpio_direction_input(NTOSD2_MSP430_IRQ);
-		if (status == 0) {
-			status = gpio_to_irq(NTOSD2_MSP430_IRQ);
-			if (status > 0) {
-				ntosd2_i2c_info[0].irq = status;
-				i2c_register_board_info(1,
-					ntosd2_i2c_info,
-					ARRAY_SIZE(ntosd2_i2c_info));
-			}
-		}
-	}
-	return status;
-}
-
 static struct davinci_mmc_config davinci_ntosd2_mmc_config = {
 	.wires		= 4,
 	.version	= MMC_CTLR_VERSION_1
@@ -218,7 +186,6 @@
 {
 	struct clk *aemif_clk;
 	struct davinci_soc_info *soc_info = &davinci_soc_info;
-	int	status;
 
 	aemif_clk = clk_get(NULL, "aemif");
 	clk_enable(aemif_clk);
@@ -242,12 +209,6 @@
 	platform_add_devices(davinci_ntosd2_devices,
 				ARRAY_SIZE(davinci_ntosd2_devices));
 
-	/* Initialize I2C interface specific for this board */
-	status = ntosd2_init_i2c();
-	if (status < 0)
-		pr_warning("davinci_ntosd2_init: msp430 irq setup failed:"
-						"	 %d\n", status);
-
 	davinci_serial_init(&uart_config);
 	dm644x_init_asp(&dm644x_ntosd2_snd_data);
 
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 373c3c0..c0bc83a 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -115,7 +115,7 @@
 }
 #endif /* CONFIG_OF */
 
-static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
+static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
 						struct exynos_pm_domain *pd)
 {
 	if (pdev->dev.bus) {
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index 7aa6313..f69ca46 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -223,7 +223,7 @@
 	clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0");
 	clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
 	clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0");
-	clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0");
+	clk_register_clkdev(clk[csi_ahb_gate], "ahb", "mx2-camera.0");
 	clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
 	clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
 	clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
@@ -250,8 +250,10 @@
 	clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1");
 	clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0");
 	clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad");
-	clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma");
-	clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma");
+	clk_register_clkdev(clk[emma_ahb_gate], "emma-ahb", "mx2-camera.0");
+	clk_register_clkdev(clk[emma_ipg_gate], "emma-ipg", "mx2-camera.0");
+	clk_register_clkdev(clk[emma_ahb_gate], "ahb", "m2m-emmaprp.0");
+	clk_register_clkdev(clk[emma_ipg_gate], "ipg", "m2m-emmaprp.0");
 	clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL);
 	clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
 	clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
index 8e19e70..1253af2 100644
--- a/arch/arm/mach-imx/clk-imx31.c
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -130,7 +130,7 @@
 	clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
 	clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
 	clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
-	clk_register_clkdev(clk[kpp_gate], "kpp", NULL);
+	clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad");
 	clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
 	clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
 	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index f608669..4bdcaa9 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -303,6 +303,7 @@
 	clk_prepare_enable(clk[aips_tz2]); /* fec */
 	clk_prepare_enable(clk[spba]);
 	clk_prepare_enable(clk[emi_fast_gate]); /* fec */
+	clk_prepare_enable(clk[emi_slow_gate]); /* eim */
 	clk_prepare_enable(clk[tmax1]);
 	clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
 	clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index ebf680b..3fa6c51 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/device.h>
+#include <linux/export.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 7b1055c..3b22675 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -456,7 +456,7 @@
 
 	clk = clk_get_sys("ap_timer", NULL);
 	BUG_ON(IS_ERR(clk));
-	clk_enable(clk);
+	clk_prepare_enable(clk);
 	rate = clk_get_rate(clk);
 
 	writel(0, TIMER0_VA_BASE + TIMER_CTRL);
diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot
index 2a576ab..a571755 100644
--- a/arch/arm/mach-kirkwood/Makefile.boot
+++ b/arch/arm/mach-kirkwood/Makefile.boot
@@ -9,5 +9,5 @@
 dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb
 dtb-$(CONFIG_MACH_TS219_DT)	+= kirkwood-qnap-ts219.dtb
 dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb
-dbt-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
-dbt-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
+dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
+dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index ccdf83b..9a8bbda 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -2,9 +2,6 @@
 
 source "arch/arm/mach-mxs/devices/Kconfig"
 
-config MXS_OCOTP
-	bool
-
 config SOC_IMX23
 	bool
 	select ARM_AMBA
@@ -66,7 +63,6 @@
 	select MXS_HAVE_PLATFORM_MXS_SAIF
 	select MXS_HAVE_PLATFORM_MXS_I2C
 	select MXS_HAVE_PLATFORM_RTC_STMP3XXX
-	select MXS_OCOTP
 	help
 	  Include support for MX28EVK platform. This includes specific
 	  configurations for the board and its peripherals.
@@ -94,7 +90,6 @@
 	select MXS_HAVE_PLATFORM_MXS_I2C
 	select MXS_HAVE_PLATFORM_MXS_MMC
 	select MXS_HAVE_PLATFORM_MXSFB
-	select MXS_OCOTP
 
 config MODULE_APX4
 	bool
@@ -106,7 +101,6 @@
 	select MXS_HAVE_PLATFORM_MXS_I2C
 	select MXS_HAVE_PLATFORM_MXS_MMC
 	select MXS_HAVE_PLATFORM_MXS_SAIF
-	select MXS_OCOTP
 
 config MACH_TX28
 	bool "Ka-Ro TX28 module"
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index e41590c..fed3695 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,7 +1,6 @@
 # Common support
-obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o
+obj-y := devices.o icoll.o iomux.o ocotp.o system.o timer.o mm.o
 
-obj-$(CONFIG_MXS_OCOTP) += ocotp.o
 obj-$(CONFIG_PM) += pm.o
 
 obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 5905ed1..d89d87ae 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -953,12 +953,12 @@
 
 static struct eeti_ts_platform_data eeti_ts_pdata = {
 	.irq_active_high = 1,
+	.irq_gpio = GPIO_TOUCH_IRQ,
 };
 
 static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
 	.type	= "eeti_ts",
 	.addr	= 0x0a,
-	.irq	= PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ),
 	.platform_data = &eeti_ts_pdata,
 };
 
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index e249611..d56b0f7 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -483,7 +483,7 @@
 	select I2C
 	select POWER_SUPPLY
 	select MACH_NEO1973
-	select S3C2410_PWM
+	select S3C24XX_PWM
 	select S3C_DEV_USB_HOST
 	help
 	   Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
@@ -493,7 +493,7 @@
 	select S3C24XX_DCLK
 	select PM_H1940 if PM
 	select I2C
-	select S3C2410_PWM
+	select S3C24XX_PWM
 	select S3C_DEV_NAND
 	select S3C2410_IOTIMING if S3C2440_CPUFREQ
 	select S3C2440_XTAL_16934400
diff --git a/arch/arm/mach-sa1100/leds-hackkit.c b/arch/arm/mach-sa1100/leds-hackkit.c
index 6a23524..f8e4723 100644
--- a/arch/arm/mach-sa1100/leds-hackkit.c
+++ b/arch/arm/mach-sa1100/leds-hackkit.c
@@ -10,6 +10,7 @@
  * as cpu led, the green one is used as timer led.
  */
 #include <linux/init.h>
+#include <linux/io.h>
 
 #include <mach/hardware.h>
 #include <asm/leds.h>
diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c
index 8fd387b..b7344be 100644
--- a/arch/arm/mach-tegra/board-harmony-power.c
+++ b/arch/arm/mach-tegra/board-harmony-power.c
@@ -51,7 +51,7 @@
 	.consumer_supplies = tps658621_ldo0_supply,
 };
 
-#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv)	\
+#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv, _on)\
 	static struct regulator_init_data _id##_data = {		\
 		.supply_regulator = _supply,				\
 		.constraints = {					\
@@ -63,21 +63,22 @@
 			.valid_ops_mask = (REGULATOR_CHANGE_MODE |	\
 					   REGULATOR_CHANGE_STATUS |	\
 					   REGULATOR_CHANGE_VOLTAGE),	\
+			.always_on = _on,				\
 		},							\
 	}
 
-HARMONY_REGULATOR_INIT(sm0,  "vdd_sm0",  "vdd_sys", 725, 1500);
-HARMONY_REGULATOR_INIT(sm1,  "vdd_sm1",  "vdd_sys", 725, 1500);
-HARMONY_REGULATOR_INIT(sm2,  "vdd_sm2",  "vdd_sys", 3000, 4550);
-HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500);
-HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500);
-HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475);
-HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL,	    1250, 3300);
-HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300);
+HARMONY_REGULATOR_INIT(sm0,  "vdd_sm0",  "vdd_sys", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(sm1,  "vdd_sm1",  "vdd_sys", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(sm2,  "vdd_sm2",  "vdd_sys", 3000, 4550, 1);
+HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500, 0);
+HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300, 1);
+HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475, 1);
+HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL,	    1250, 3300, 1);
+HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300, 1);
 
 #define TPS_REG(_id, _data)			\
 	{					\
@@ -119,9 +120,10 @@
 
 int __init harmony_regulator_init(void)
 {
+	regulator_register_always_on(0, "vdd_sys",
+		NULL, 0, 5000000);
+
 	if (machine_is_harmony()) {
-		regulator_register_always_on(0, "vdd_sys",
-			NULL, 0, 5000000);
 		i2c_register_board_info(3, harmony_regulators, 1);
 	} else { /* Harmony, booted using device tree */
 		struct device_node *np;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c2cdf65..4e7d118 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -358,7 +358,7 @@
 		if (end > arm_lowmem_limit)
 			end = arm_lowmem_limit;
 		if (start >= end)
-			return;
+			continue;
 
 		map.pfn = __phys_to_pfn(start);
 		map.virtual = __phys_to_virt(start);
@@ -423,7 +423,7 @@
 	unsigned int pageno;
 	unsigned long flags;
 	void *ptr = NULL;
-	size_t align;
+	unsigned long align_mask;
 
 	if (!pool->vaddr) {
 		WARN(1, "coherent pool not initialised!\n");
@@ -435,11 +435,11 @@
 	 * small, so align them to their order in pages, minimum is a page
 	 * size. This helps reduce fragmentation of the DMA space.
 	 */
-	align = PAGE_SIZE << get_order(size);
+	align_mask = (1 << get_order(size)) - 1;
 
 	spin_lock_irqsave(&pool->lock, flags);
 	pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
-					    0, count, (1 << align) - 1);
+					    0, count, align_mask);
 	if (pageno < pool->nr_pages) {
 		bitmap_set(pool->bitmap, pageno, count);
 		ptr = pool->vaddr + PAGE_SIZE * pageno;
@@ -648,12 +648,12 @@
 
 	if (arch_is_coherent() || nommu()) {
 		__dma_free_buffer(page, size);
+	} else if (__free_from_pool(cpu_addr, size)) {
+		return;
 	} else if (!IS_ENABLED(CONFIG_CMA)) {
 		__dma_free_remap(cpu_addr, size);
 		__dma_free_buffer(page, size);
 	} else {
-		if (__free_from_pool(cpu_addr, size))
-			return;
 		/*
 		 * Non-atomic allocations cannot be freed with IRQs disabled
 		 */
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 7745854..40ca11e 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -231,8 +231,6 @@
 	struct page *page;
 	struct address_space *mapping;
 
-	if (!pte_present_user(pteval))
-		return;
 	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
 		/* only flush non-aliasing VIPT caches for exec mappings */
 		return;
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index c202113..ea94765 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -38,10 +38,10 @@
 	dsb
 	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
 	mov	r1, r1, lsr #PAGE_SHIFT
-#ifdef CONFIG_ARM_ERRATA_720789
-	mov	r3, #0
-#else
 	asid	r3, r3				@ mask ASID
+#ifdef CONFIG_ARM_ERRATA_720789
+	ALT_SMP(W(mov)	r3, #0	)
+	ALT_UP(W(nop)		)
 #endif
 	orr	r0, r3, r0, lsl #PAGE_SHIFT	@ Create initial MVA
 	mov	r1, r1, lsl #PAGE_SHIFT
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 7aca31c..9c3b90c 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -403,7 +403,8 @@
 
 config S3C24XX_PWM
 	bool "PWM device support"
-	select HAVE_PWM
+	select PWM
+	select PWM_SAMSUNG
 	help
 	  Support for exporting the PWM timer blocks via the pwm device
 	  system
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index fb849d0..c834b32 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -719,8 +719,10 @@
 			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
 				elf_hwcap |= HWCAP_NEON;
 #endif
+#ifdef CONFIG_VFPv3
 			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
 				elf_hwcap |= HWCAP_VFPv4;
+#endif
 		}
 	}
 	return 0;
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index ada8f0f..fb96e60 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -52,7 +52,6 @@
 #ifdef CONFIG_MTD_UCLINUX
 extern struct map_info uclinux_ram_map;
 unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
-unsigned long _ebss;
 EXPORT_SYMBOL(memory_mtd_end);
 EXPORT_SYMBOL(memory_mtd_start);
 EXPORT_SYMBOL(mtd_size);
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 052f81a..983c859 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -6,6 +6,7 @@
 config C6X
 	def_bool y
 	select CLKDEV_LOOKUP
+	select GENERIC_ATOMIC64
 	select GENERIC_IRQ_SHOW
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_DMA_API_DEBUG
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
index 6d521d9..09c5a0f 100644
--- a/arch/c6x/include/asm/cache.h
+++ b/arch/c6x/include/asm/cache.h
@@ -1,7 +1,7 @@
 /*
  *  Port on Texas Instruments TMS320C6x architecture
  *
- *  Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated
+ *  Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated
  *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -16,9 +16,14 @@
 /*
  * Cache line size
  */
-#define L1D_CACHE_BYTES   64
-#define L1P_CACHE_BYTES   32
-#define L2_CACHE_BYTES	  128
+#define L1D_CACHE_SHIFT   6
+#define L1D_CACHE_BYTES   (1 << L1D_CACHE_SHIFT)
+
+#define L1P_CACHE_SHIFT   5
+#define L1P_CACHE_BYTES   (1 << L1P_CACHE_SHIFT)
+
+#define L2_CACHE_SHIFT    7
+#define L2_CACHE_BYTES    (1 << L2_CACHE_SHIFT)
 
 /*
  * L2 used as cache
@@ -29,7 +34,8 @@
  * For practical reasons the L1_CACHE_BYTES defines should not be smaller than
  * the L2 line size
  */
-#define L1_CACHE_BYTES        L2_CACHE_BYTES
+#define L1_CACHE_SHIFT        L2_CACHE_SHIFT
+#define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)
 
 #define L2_CACHE_ALIGN_LOW(x) \
 	(((x) & ~(L2_CACHE_BYTES - 1)))
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 954d81e..7913695 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -234,5 +234,4 @@
 CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_T10DIF=y
-CONFIG_MISC_DEVICES=y
 CONFIG_INTEL_IOMMU=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 91c41ec..f8e9133 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -209,4 +209,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_CRYPTO_MD5=y
-CONFIG_MISC_DEVICES=y
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 6f38b61..44057885 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -497,7 +497,7 @@
 	srat_num_cpus++;
 }
 
-void __init
+int __init
 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
 {
 	unsigned long paddr, size;
@@ -512,7 +512,7 @@
 
 	/* Ignore disabled entries */
 	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
-		return;
+		return -1;
 
 	/* record this node in proximity bitmap */
 	pxm_bit_set(pxm);
@@ -531,6 +531,7 @@
 	p->size = size;
 	p->nid = pxm;
 	num_node_memblks++;
+	return 0;
 }
 
 void __init acpi_numa_arch_fixup(void)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 0b0f8b8..b22df94 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -5,6 +5,7 @@
 	select HAVE_AOUT if MMU
 	select HAVE_GENERIC_HARDIRQS
 	select GENERIC_IRQ_SHOW
+	select GENERIC_ATOMIC64
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
 	select GENERIC_CPU_DEVICES
 	select GENERIC_STRNCPY_FROM_USER if MMU
@@ -54,18 +55,6 @@
 	bool
 	default y
 
-config CPU_HAS_NO_BITFIELDS
-	bool
-
-config CPU_HAS_NO_MULDIV64
-	bool
-
-config CPU_HAS_ADDRESS_SPACES
-	bool
-
-config FPU
-	bool
-
 config HZ
 	int
 	default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 43a9f8f..c4eb79e 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -28,6 +28,7 @@
 	select CPU_HAS_NO_BITFIELDS
 	select CPU_HAS_NO_MULDIV64
 	select GENERIC_CSUM
+	select HAVE_CLK
 
 endchoice
 
@@ -37,6 +38,7 @@
 	bool
 	select CPU_HAS_NO_BITFIELDS
 	select CPU_HAS_NO_MULDIV64
+	select CPU_HAS_NO_UNALIGNED
 	select GENERIC_CSUM
 	help
 	  The Freescale (was Motorola) 68000 CPU is the first generation of
@@ -48,6 +50,7 @@
 config MCPU32
 	bool
 	select CPU_HAS_NO_BITFIELDS
+	select CPU_HAS_NO_UNALIGNED
 	help
 	  The Freescale (was then Motorola) CPU32 is a CPU core that is
 	  based on the 68020 processor. For the most part it is used in
@@ -56,7 +59,6 @@
 config M68020
 	bool "68020 support"
 	depends on MMU
-	select GENERIC_ATOMIC64
 	select CPU_HAS_ADDRESS_SPACES
 	help
 	  If you anticipate running this kernel on a computer with a MC68020
@@ -67,7 +69,6 @@
 config M68030
 	bool "68030 support"
 	depends on MMU && !MMU_SUN3
-	select GENERIC_ATOMIC64
 	select CPU_HAS_ADDRESS_SPACES
 	help
 	  If you anticipate running this kernel on a computer with a MC68030
@@ -77,7 +78,6 @@
 config M68040
 	bool "68040 support"
 	depends on MMU && !MMU_SUN3
-	select GENERIC_ATOMIC64
 	select CPU_HAS_ADDRESS_SPACES
 	help
 	  If you anticipate running this kernel on a computer with a MC68LC040
@@ -88,7 +88,6 @@
 config M68060
 	bool "68060 support"
 	depends on MMU && !MMU_SUN3
-	select GENERIC_ATOMIC64
 	select CPU_HAS_ADDRESS_SPACES
 	help
 	  If you anticipate running this kernel on a computer with a MC68060
@@ -376,6 +375,18 @@
 	default "3"
 	depends on !SINGLE_MEMORY_CHUNK
 
+config CPU_HAS_NO_BITFIELDS
+	bool
+
+config CPU_HAS_NO_MULDIV64
+	bool
+
+config CPU_HAS_NO_UNALIGNED
+	bool
+
+config CPU_HAS_ADDRESS_SPACES
+	bool
+
 config FPU
 	bool
 
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 0a30406..f5565d6 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -177,8 +177,8 @@
 
 	timer_handler(irq, dev_id);
 
-	x=*(volatile unsigned char *)(timer+3);
-	x=*(volatile unsigned char *)(timer+5);
+	x = *(volatile unsigned char *)(apollo_timer + 3);
+	x = *(volatile unsigned char *)(apollo_timer + 5);
 
 	return IRQ_HANDLED;
 }
@@ -186,17 +186,17 @@
 void dn_sched_init(irq_handler_t timer_routine)
 {
 	/* program timer 1 */
-	*(volatile unsigned char *)(timer+3)=0x01;
-	*(volatile unsigned char *)(timer+1)=0x40;
-	*(volatile unsigned char *)(timer+5)=0x09;
-	*(volatile unsigned char *)(timer+7)=0xc4;
+	*(volatile unsigned char *)(apollo_timer + 3) = 0x01;
+	*(volatile unsigned char *)(apollo_timer + 1) = 0x40;
+	*(volatile unsigned char *)(apollo_timer + 5) = 0x09;
+	*(volatile unsigned char *)(apollo_timer + 7) = 0xc4;
 
 	/* enable IRQ of PIC B */
 	*(volatile unsigned char *)(pica+1)&=(~8);
 
 #if 0
-	printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
-	printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
+	printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
+	printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
 #endif
 
 	if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index eafa253..a74e5d9 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,4 +1,29 @@
 include include/asm-generic/Kbuild.asm
 header-y += cachectl.h
 
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += futex.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local64.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += mutex.h
+generic-y += percpu.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += siginfo.h
+generic-y += statfs.h
+generic-y += topology.h
+generic-y += types.h
 generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/m68k/include/asm/MC68332.h b/arch/m68k/include/asm/MC68332.h
deleted file mode 100644
index 6bb8f02..0000000
--- a/arch/m68k/include/asm/MC68332.h
+++ /dev/null
@@ -1,152 +0,0 @@
-
-/* include/asm-m68knommu/MC68332.h: '332 control registers
- *
- * Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
- *
- */
-
-#ifndef _MC68332_H_
-#define _MC68332_H_
-
-#define BYTE_REF(addr) (*((volatile unsigned char*)addr))
-#define WORD_REF(addr) (*((volatile unsigned short*)addr))
-
-#define PORTE_ADDR	0xfffa11
-#define PORTE	BYTE_REF(PORTE_ADDR)
-#define DDRE_ADDR	0xfffa15
-#define DDRE	BYTE_REF(DDRE_ADDR)
-#define PEPAR_ADDR	0xfffa17
-#define PEPAR	BYTE_REF(PEPAR_ADDR)
-
-#define PORTF_ADDR	0xfffa19
-#define PORTF	BYTE_REF(PORTF_ADDR)
-#define DDRF_ADDR	0xfffa1d
-#define DDRF	BYTE_REF(DDRF_ADDR)
-#define PFPAR_ADDR	0xfffa1f
-#define PFPAR	BYTE_REF(PFPAR_ADDR)
-
-#define PORTQS_ADDR	0xfffc15
-#define PORTQS	BYTE_REF(PORTQS_ADDR)
-#define DDRQS_ADDR	0xfffc17
-#define DDRQS	BYTE_REF(DDRQS_ADDR)
-#define PQSPAR_ADDR	0xfffc16
-#define PQSPAR	BYTE_REF(PQSPAR_ADDR)
-
-#define CSPAR0_ADDR 0xFFFA44
-#define CSPAR0 WORD_REF(CSPAR0_ADDR)
-#define CSPAR1_ADDR 0xFFFA46
-#define CSPAR1 WORD_REF(CSPAR1_ADDR)
-#define CSARBT_ADDR 0xFFFA48
-#define CSARBT WORD_REF(CSARBT_ADDR)
-#define CSOPBT_ADDR 0xFFFA4A
-#define CSOPBT WORD_REF(CSOPBT_ADDR)
-#define CSBAR0_ADDR 0xFFFA4C
-#define CSBAR0 WORD_REF(CSBAR0_ADDR)
-#define CSOR0_ADDR 0xFFFA4E
-#define CSOR0 WORD_REF(CSOR0_ADDR)
-#define CSBAR1_ADDR 0xFFFA50
-#define CSBAR1 WORD_REF(CSBAR1_ADDR)
-#define CSOR1_ADDR 0xFFFA52
-#define CSOR1 WORD_REF(CSOR1_ADDR)
-#define CSBAR2_ADDR 0xFFFA54
-#define CSBAR2 WORD_REF(CSBAR2_ADDR)
-#define CSOR2_ADDR 0xFFFA56
-#define CSOR2 WORD_REF(CSOR2_ADDR)
-#define CSBAR3_ADDR 0xFFFA58
-#define CSBAR3 WORD_REF(CSBAR3_ADDR)
-#define CSOR3_ADDR 0xFFFA5A
-#define CSOR3 WORD_REF(CSOR3_ADDR)
-#define CSBAR4_ADDR 0xFFFA5C
-#define CSBAR4 WORD_REF(CSBAR4_ADDR)
-#define CSOR4_ADDR 0xFFFA5E
-#define CSOR4 WORD_REF(CSOR4_ADDR)
-#define CSBAR5_ADDR 0xFFFA60
-#define CSBAR5 WORD_REF(CSBAR5_ADDR)
-#define CSOR5_ADDR 0xFFFA62
-#define CSOR5 WORD_REF(CSOR5_ADDR)
-#define CSBAR6_ADDR 0xFFFA64
-#define CSBAR6 WORD_REF(CSBAR6_ADDR)
-#define CSOR6_ADDR 0xFFFA66
-#define CSOR6 WORD_REF(CSOR6_ADDR)
-#define CSBAR7_ADDR 0xFFFA68
-#define CSBAR7 WORD_REF(CSBAR7_ADDR)
-#define CSOR7_ADDR 0xFFFA6A
-#define CSOR7 WORD_REF(CSOR7_ADDR)
-#define CSBAR8_ADDR 0xFFFA6C
-#define CSBAR8 WORD_REF(CSBAR8_ADDR)
-#define CSOR8_ADDR 0xFFFA6E
-#define CSOR8 WORD_REF(CSOR8_ADDR)
-#define CSBAR9_ADDR 0xFFFA70
-#define CSBAR9 WORD_REF(CSBAR9_ADDR)
-#define CSOR9_ADDR 0xFFFA72
-#define CSOR9 WORD_REF(CSOR9_ADDR)
-#define CSBAR10_ADDR 0xFFFA74
-#define CSBAR10 WORD_REF(CSBAR10_ADDR)
-#define CSOR10_ADDR 0xFFFA76
-#define CSOR10 WORD_REF(CSOR10_ADDR)
-
-#define CSOR_MODE_ASYNC	0x0000
-#define CSOR_MODE_SYNC	0x8000
-#define CSOR_MODE_MASK	0x8000
-#define CSOR_BYTE_DISABLE	0x0000
-#define CSOR_BYTE_UPPER		0x4000
-#define CSOR_BYTE_LOWER		0x2000
-#define CSOR_BYTE_BOTH		0x6000
-#define CSOR_BYTE_MASK		0x6000
-#define CSOR_RW_RSVD		0x0000
-#define CSOR_RW_READ		0x0800
-#define CSOR_RW_WRITE		0x1000
-#define CSOR_RW_BOTH		0x1800
-#define CSOR_RW_MASK		0x1800
-#define CSOR_STROBE_DS		0x0400
-#define CSOR_STROBE_AS		0x0000
-#define CSOR_STROBE_MASK	0x0400
-#define CSOR_DSACK_WAIT(x)	(wait << 6)
-#define CSOR_DSACK_FTERM	(14 << 6)
-#define CSOR_DSACK_EXTERNAL	(15 << 6)
-#define CSOR_DSACK_MASK		0x03c0
-#define CSOR_SPACE_CPU		0x0000
-#define CSOR_SPACE_USER		0x0010
-#define CSOR_SPACE_SU		0x0020
-#define CSOR_SPACE_BOTH		0x0030
-#define CSOR_SPACE_MASK		0x0030
-#define CSOR_IPL_ALL		0x0000
-#define CSOR_IPL_PRIORITY(x)	(x << 1)
-#define CSOR_IPL_MASK		0x000e
-#define CSOR_AVEC_ON		0x0001
-#define CSOR_AVEC_OFF		0x0000
-#define CSOR_AVEC_MASK		0x0001
-
-#define CSBAR_ADDR(x)		((addr >> 11) << 3) 
-#define CSBAR_ADDR_MASK		0xfff8
-#define CSBAR_BLKSIZE_2K	0x0000
-#define CSBAR_BLKSIZE_8K	0x0001
-#define CSBAR_BLKSIZE_16K	0x0002
-#define CSBAR_BLKSIZE_64K	0x0003
-#define CSBAR_BLKSIZE_128K	0x0004
-#define CSBAR_BLKSIZE_256K	0x0005
-#define CSBAR_BLKSIZE_512K	0x0006
-#define CSBAR_BLKSIZE_1M	0x0007
-#define CSBAR_BLKSIZE_MASK	0x0007
-
-#define CSPAR_DISC	0
-#define CSPAR_ALT	1
-#define CSPAR_CS8	2
-#define CSPAR_CS16	3
-#define CSPAR_MASK	3
-
-#define CSPAR0_CSBOOT(x) (x << 0)
-#define CSPAR0_CS0(x)	(x << 2)
-#define CSPAR0_CS1(x)	(x << 4)
-#define CSPAR0_CS2(x)	(x << 6)
-#define CSPAR0_CS3(x)	(x << 8)
-#define CSPAR0_CS4(x)	(x << 10)
-#define CSPAR0_CS5(x)	(x << 12)
-
-#define CSPAR1_CS6(x)	(x << 0)
-#define CSPAR1_CS7(x)	(x << 2)
-#define CSPAR1_CS8(x)	(x << 4)
-#define CSPAR1_CS9(x)	(x << 6)
-#define CSPAR1_CS10(x)	(x << 8)
-
-#endif
diff --git a/arch/m68k/include/asm/apollodma.h b/arch/m68k/include/asm/apollodma.h
deleted file mode 100644
index 954adc8..0000000
--- a/arch/m68k/include/asm/apollodma.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- */
-
-#ifndef _ASM_APOLLO_DMA_H
-#define _ASM_APOLLO_DMA_H
-
-#include <asm/apollohw.h>		/* need byte IO */
-#include <linux/spinlock.h>		/* And spinlocks */
-#include <linux/delay.h>
-
-
-#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val))
-#define dma_inb(addr)	   (*((volatile unsigned char *)(addr+IO_BASE)))
-
-/*
- * NOTES about DMA transfers:
- *
- *  controller 1: channels 0-3, byte operations, ports 00-1F
- *  controller 2: channels 4-7, word operations, ports C0-DF
- *
- *  - ALL registers are 8 bits only, regardless of transfer size
- *  - channel 4 is not used - cascades 1 into 2.
- *  - channels 0-3 are byte - addresses/counts are for physical bytes
- *  - channels 5-7 are word - addresses/counts are for physical words
- *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- *  - transfer count loaded to registers is 1 less than actual count
- *  - controller 2 offsets are all even (2x offsets for controller 1)
- *  - page registers for 5-7 don't use data bit 0, represent 128K pages
- *  - page registers for 0-3 use bit 0, represent 64K pages
- *
- * DMA transfers are limited to the lower 16MB of _physical_ memory.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- *  Address mapping for channels 0-3:
- *
- *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
- *    |  ...  |   |  ... |   |  ... |
- *    |  ...  |   |  ... |   |  ... |
- *    |  ...  |   |  ... |   |  ... |
- *   P7  ...  P0  A7 ... A0  A7 ... A0
- * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
- *
- *  Address mapping for channels 5-7:
- *
- *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
- *    |  ...  |   \   \   ... \  \  \  ... \  \
- *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
- *    |  ...  |     \   \   ... \  \  \  ... \
- *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0
- * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation.
- *
- */
-
-#define MAX_DMA_CHANNELS	8
-
-/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS      (PAGE_OFFSET+0x1000000)
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE	0x10C00	/* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE	0x10D00	/* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG		(IO_DMA1_BASE+0x08) /* command register (w) */
-#define DMA1_STAT_REG		(IO_DMA1_BASE+0x08) /* status register (r) */
-#define DMA1_REQ_REG            (IO_DMA1_BASE+0x09) /* request register (w) */
-#define DMA1_MASK_REG		(IO_DMA1_BASE+0x0A) /* single-channel mask (w) */
-#define DMA1_MODE_REG		(IO_DMA1_BASE+0x0B) /* mode register (w) */
-#define DMA1_CLEAR_FF_REG	(IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG           (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */
-#define DMA1_RESET_REG		(IO_DMA1_BASE+0x0D) /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG       (IO_DMA1_BASE+0x0E) /* Clear Mask */
-#define DMA1_MASK_ALL_REG       (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */
-
-#define DMA2_CMD_REG		(IO_DMA2_BASE+0x10) /* command register (w) */
-#define DMA2_STAT_REG		(IO_DMA2_BASE+0x10) /* status register (r) */
-#define DMA2_REQ_REG            (IO_DMA2_BASE+0x12) /* request register (w) */
-#define DMA2_MASK_REG		(IO_DMA2_BASE+0x14) /* single-channel mask (w) */
-#define DMA2_MODE_REG		(IO_DMA2_BASE+0x16) /* mode register (w) */
-#define DMA2_CLEAR_FF_REG	(IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG           (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */
-#define DMA2_RESET_REG		(IO_DMA2_BASE+0x1A) /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG       (IO_DMA2_BASE+0x1C) /* Clear Mask */
-#define DMA2_MASK_ALL_REG       (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */
-
-#define DMA_ADDR_0              (IO_DMA1_BASE+0x00) /* DMA address registers */
-#define DMA_ADDR_1              (IO_DMA1_BASE+0x02)
-#define DMA_ADDR_2              (IO_DMA1_BASE+0x04)
-#define DMA_ADDR_3              (IO_DMA1_BASE+0x06)
-#define DMA_ADDR_4              (IO_DMA2_BASE+0x00)
-#define DMA_ADDR_5              (IO_DMA2_BASE+0x04)
-#define DMA_ADDR_6              (IO_DMA2_BASE+0x08)
-#define DMA_ADDR_7              (IO_DMA2_BASE+0x0C)
-
-#define DMA_CNT_0               (IO_DMA1_BASE+0x01)   /* DMA count registers */
-#define DMA_CNT_1               (IO_DMA1_BASE+0x03)
-#define DMA_CNT_2               (IO_DMA1_BASE+0x05)
-#define DMA_CNT_3               (IO_DMA1_BASE+0x07)
-#define DMA_CNT_4               (IO_DMA2_BASE+0x02)
-#define DMA_CNT_5               (IO_DMA2_BASE+0x06)
-#define DMA_CNT_6               (IO_DMA2_BASE+0x0A)
-#define DMA_CNT_7               (IO_DMA2_BASE+0x0E)
-
-#define DMA_MODE_READ	0x44	/* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE	0x48	/* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0   /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT	0x10
-
-#define DMA_8BIT 0
-#define DMA_16BIT 1
-#define DMA_BUSMASTER 2
-
-extern spinlock_t  dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&dma_spin_lock, flags);
-	return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
-	spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
-	if (dmanr<=3)
-		dma_outb(dmanr,  DMA1_MASK_REG);
-	else
-		dma_outb(dmanr & 3,  DMA2_MASK_REG);
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
-	if (dmanr<=3)
-		dma_outb(dmanr | 4,  DMA1_MASK_REG);
-	else
-		dma_outb((dmanr & 3) | 4,  DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while holding the DMA lock ! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-	if (dmanr<=3)
-		dma_outb(0,  DMA1_CLEAR_FF_REG);
-	else
-		dma_outb(0,  DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-	if (dmanr<=3)
-		dma_outb(mode | dmanr,  DMA1_MODE_REG);
-	else
-		dma_outb(mode | (dmanr&3),  DMA2_MODE_REG);
-}
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
-	if (dmanr <= 3)  {
-	    dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
-            dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
-	}  else  {
-	    dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
-	    dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
-	}
-}
-
-
-/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
-        count--;
-	if (dmanr <= 3)  {
-	    dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
-	    dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
-        } else {
-	    dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
-	    dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
-        }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
-	unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
-					 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
-	/* using short to get 16-bit wrap around */
-	unsigned short count;
-
-	count = 1 + dma_inb(io_port);
-	count += dma_inb(io_port) << 8;
-
-	return (dmanr<=3)? count : (count<<1);
-}
-
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id);	/* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr);	/* release it again */
-
-/* These are in arch/m68k/apollo/dma.c: */
-extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type);
-extern void dma_unmap_page(unsigned short dma_addr);
-
-#endif /* _ASM_APOLLO_DMA_H */
diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h
index a1373b9..635ef4f 100644
--- a/arch/m68k/include/asm/apollohw.h
+++ b/arch/m68k/include/asm/apollohw.h
@@ -98,7 +98,7 @@
 #define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr))
 #define pica (IO_BASE + pica_physaddr)
 #define picb (IO_BASE + picb_physaddr)
-#define timer (IO_BASE + timer_physaddr)
+#define apollo_timer (IO_BASE + timer_physaddr)
 #define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000))
 
 #define isaIO2mem(x) (((((x) & 0x3f8)  << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE)
diff --git a/arch/m68k/include/asm/bitsperlong.h b/arch/m68k/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0..0000000
--- a/arch/m68k/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/m68k/include/asm/cputime.h b/arch/m68k/include/asm/cputime.h
deleted file mode 100644
index c79c5e8..0000000
--- a/arch/m68k/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __M68K_CPUTIME_H
-#define __M68K_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __M68K_CPUTIME_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index 9c09bec..12d8fe4 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -43,7 +43,7 @@
 extern void __bad_udelay(void);
 
 
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
 /*
  * The simpler m68k and ColdFire processors do not have a 32*32->64
  * multiply instruction. So we need to handle them a little differently.
diff --git a/arch/m68k/include/asm/device.h b/arch/m68k/include/asm/device.h
deleted file mode 100644
index d8f9872..0000000
--- a/arch/m68k/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/m68k/include/asm/emergency-restart.h b/arch/m68k/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c4..0000000
--- a/arch/m68k/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/m68k/include/asm/errno.h b/arch/m68k/include/asm/errno.h
deleted file mode 100644
index 0d4e188..0000000
--- a/arch/m68k/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_ERRNO_H
-#define _M68K_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* _M68K_ERRNO_H */
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644
index 6a332a9..0000000
--- a/arch/m68k/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif
diff --git a/arch/m68k/include/asm/ioctl.h b/arch/m68k/include/asm/ioctl.h
deleted file mode 100644
index b279fe0..0000000
--- a/arch/m68k/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51..0000000
--- a/arch/m68k/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/m68k/include/asm/irq_regs.h b/arch/m68k/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b..0000000
--- a/arch/m68k/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/m68k/include/asm/kdebug.h b/arch/m68k/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b0..0000000
--- a/arch/m68k/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h
deleted file mode 100644
index 3413cc1..0000000
--- a/arch/m68k/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_KMAP_TYPES_H
-#define __ASM_M68K_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif	/* __ASM_M68K_KMAP_TYPES_H */
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f..0000000
--- a/arch/m68k/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/m68k/include/asm/local.h b/arch/m68k/include/asm/local.h
deleted file mode 100644
index 6c25926..0000000
--- a/arch/m68k/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_M68K_LOCAL_H
-#define _ASM_M68K_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* _ASM_M68K_LOCAL_H */
diff --git a/arch/m68k/include/asm/local64.h b/arch/m68k/include/asm/local64.h
deleted file mode 100644
index 36c93b5..0000000
--- a/arch/m68k/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/m68k/include/asm/mac_mouse.h b/arch/m68k/include/asm/mac_mouse.h
deleted file mode 100644
index 39a5c29..0000000
--- a/arch/m68k/include/asm/mac_mouse.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _ASM_MAC_MOUSE_H
-#define _ASM_MAC_MOUSE_H
-
-/*
- * linux/include/asm-m68k/mac_mouse.h
- * header file for Macintosh ADB mouse driver
- * 27-10-97 Michael Schmitz
- * copied from:
- * header file for Atari Mouse driver
- * by Robert de Vries (robert@and.nl) on 19Jul93
- */
-
-struct mouse_status {
-	char		buttons;
-	short		dx;
-	short		dy;
-	int		ready;
-	int		active;
-	wait_queue_head_t wait;
-	struct fasync_struct *fasyncptr;
-};
-
-#endif
diff --git a/arch/m68k/include/asm/mcfmbus.h b/arch/m68k/include/asm/mcfmbus.h
deleted file mode 100644
index 319899c..0000000
--- a/arch/m68k/include/asm/mcfmbus.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/****************************************************************************/
-
-/*
- *      mcfmbus.h -- Coldfire MBUS support defines.
- *
- *      (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de) 
- */
-
-/****************************************************************************/
-
-
-#ifndef mcfmbus_h
-#define mcfmbus_h
-
-
-#define MCFMBUS_BASE		0x280
-#define MCFMBUS_IRQ_VECTOR	0x19
-#define MCFMBUS_IRQ		0x1
-#define MCFMBUS_CLK		0x3f
-#define MCFMBUS_IRQ_LEVEL	0x07	/*IRQ Level 1*/
-#define MCFMBUS_ADDRESS		0x01
-
-
-/*
-*	Define the 5307 MBUS register set addresses
-*/
-
-#define MCFMBUS_MADR	0x00
-#define MCFMBUS_MFDR	0x04
-#define MCFMBUS_MBCR	0x08
-#define MCFMBUS_MBSR	0x0C
-#define MCFMBUS_MBDR	0x10
-
-
-#define MCFMBUS_MADR_ADDR(a)	(((a)&0x7F)<<0x01) /*Slave Address*/
-
-#define MCFMBUS_MFDR_MBC(a)	((a)&0x3F)	   /*M-Bus Clock*/
-
-/*
-*	Define bit flags in Control Register
-*/
-
-#define MCFMBUS_MBCR_MEN           (0x80)  /* M-Bus Enable                 */
-#define MCFMBUS_MBCR_MIEN          (0x40)  /* M-Bus Interrupt Enable       */
-#define MCFMBUS_MBCR_MSTA          (0x20)  /* Master/Slave Mode Select Bit */
-#define MCFMBUS_MBCR_MTX           (0x10)  /* Transmit/Rcv Mode Select Bit */
-#define MCFMBUS_MBCR_TXAK          (0x08)  /* Transmit Acknowledge Enable  */
-#define MCFMBUS_MBCR_RSTA          (0x04)  /* Repeat Start                 */
-
-/*
-*	Define bit flags in Status Register
-*/
-
-#define MCFMBUS_MBSR_MCF           (0x80)  /* Data Transfer Complete       */
-#define MCFMBUS_MBSR_MAAS          (0x40)  /* Addressed as a Slave         */
-#define MCFMBUS_MBSR_MBB           (0x20)  /* Bus Busy                     */
-#define MCFMBUS_MBSR_MAL           (0x10)  /* Arbitration Lost             */
-#define MCFMBUS_MBSR_SRW           (0x04)  /* Slave Transmit               */
-#define MCFMBUS_MBSR_MIF           (0x02)  /* M-Bus Interrupt              */
-#define MCFMBUS_MBSR_RXAK          (0x01)  /* No Acknowledge Received      */
-
-/*
-*	Define bit flags in DATA I/O Register
-*/
-
-#define MCFMBUS_MBDR_READ          (0x01)  /* 1=read 0=write MBUS */
-
-#define MBUSIOCSCLOCK		1
-#define MBUSIOCGCLOCK		2
-#define MBUSIOCSADDR			3
-#define MBUSIOCGADDR			4
-#define MBUSIOCSSLADDR			5
-#define MBUSIOCGSLADDR			6
-#define MBUSIOCSSUBADDR			7
-#define MBUSIOCGSUBADDR			8
-
-#endif
diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h
deleted file mode 100644
index 8eebf89..0000000
--- a/arch/m68k/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/m68k/include/asm/mutex.h b/arch/m68k/include/asm/mutex.h
deleted file mode 100644
index 458c1f7..0000000
--- a/arch/m68k/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/m68k/include/asm/percpu.h b/arch/m68k/include/asm/percpu.h
deleted file mode 100644
index 0859d04..0000000
--- a/arch/m68k/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_PERCPU_H
-#define __ASM_M68K_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif	/* __ASM_M68K_PERCPU_H */
diff --git a/arch/m68k/include/asm/resource.h b/arch/m68k/include/asm/resource.h
deleted file mode 100644
index e7d3501..0000000
--- a/arch/m68k/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_RESOURCE_H
-#define _M68K_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* _M68K_RESOURCE_H */
diff --git a/arch/m68k/include/asm/sbus.h b/arch/m68k/include/asm/sbus.h
deleted file mode 100644
index bfe3ba1..0000000
--- a/arch/m68k/include/asm/sbus.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * some sbus structures and macros to make usage of sbus drivers possible
- */
-
-#ifndef __M68K_SBUS_H
-#define __M68K_SBUS_H
-
-struct sbus_dev {
-	struct {
-		unsigned int which_io;
-		unsigned int phys_addr;
-	} reg_addrs[1];
-};
-
-/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */
-/* No SBUS on the Sun3, kludge -- sam */
-
-static inline void _sbus_writeb(unsigned char val, unsigned long addr)
-{
-	*(volatile unsigned char *)addr = val;
-}
-
-static inline unsigned char _sbus_readb(unsigned long addr)
-{
-	return *(volatile unsigned char *)addr;
-}
-
-static inline void _sbus_writel(unsigned long val, unsigned long addr)
-{
-	*(volatile unsigned long *)addr = val;
-
-}
-
-extern inline unsigned long _sbus_readl(unsigned long addr)
-{
-	return *(volatile unsigned long *)addr;
-}
-
-
-#define sbus_readb(a) _sbus_readb((unsigned long)a)
-#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a)
-#define sbus_readl(a) _sbus_readl((unsigned long)a)
-#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a)
-
-#endif
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
deleted file mode 100644
index 3125054..0000000
--- a/arch/m68k/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SCATTERLIST_H
-#define _M68K_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/m68k/include/asm/sections.h b/arch/m68k/include/asm/sections.h
deleted file mode 100644
index 5277e52..0000000
--- a/arch/m68k/include/asm/sections.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_M68K_SECTIONS_H
-#define _ASM_M68K_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-extern char _sbss[], _ebss[];
-
-#endif /* _ASM_M68K_SECTIONS_H */
diff --git a/arch/m68k/include/asm/shm.h b/arch/m68k/include/asm/shm.h
deleted file mode 100644
index fa56ec8..0000000
--- a/arch/m68k/include/asm/shm.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _M68K_SHM_H
-#define _M68K_SHM_H
-
-
-/* format of page table entries that correspond to shared memory pages
-   currently out in swap space (see also mm/swap.c):
-   bits 0-1 (PAGE_PRESENT) is  = 0
-   bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
-   bits 31..9 are used like this:
-   bits 15..9 (SHM_ID) the id of the shared memory segment
-   bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
-                    (actually only bits 25..16 get used since SHMMAX is so low)
-   bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
-*/
-/* on the m68k both bits 0 and 1 must be zero */
-/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
-   others are reduced by 2. --m */
-
-#ifndef CONFIG_SUN3
-#define SHM_ID_SHIFT	9
-#else
-#define SHM_ID_SHIFT	7
-#endif
-#define _SHM_ID_BITS	7
-#define SHM_ID_MASK	((1<<_SHM_ID_BITS)-1)
-
-#define SHM_IDX_SHIFT	(SHM_ID_SHIFT+_SHM_ID_BITS)
-#define _SHM_IDX_BITS	15
-#define SHM_IDX_MASK	((1<<_SHM_IDX_BITS)-1)
-
-#endif /* _M68K_SHM_H */
diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h
deleted file mode 100644
index 851d3d7..0000000
--- a/arch/m68k/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SIGINFO_H
-#define _M68K_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/m68k/include/asm/statfs.h b/arch/m68k/include/asm/statfs.h
deleted file mode 100644
index 08d93f1..0000000
--- a/arch/m68k/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_STATFS_H
-#define _M68K_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* _M68K_STATFS_H */
diff --git a/arch/m68k/include/asm/topology.h b/arch/m68k/include/asm/topology.h
deleted file mode 100644
index ca173e9..0000000
--- a/arch/m68k/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_M68K_TOPOLOGY_H
-#define _ASM_M68K_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_M68K_TOPOLOGY_H */
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
deleted file mode 100644
index 89705ad..0000000
--- a/arch/m68k/include/asm/types.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _M68K_TYPES_H
-#define _M68K_TYPES_H
-
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue.  However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-#include <asm-generic/int-ll64.h>
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#ifdef __KERNEL__
-
-#define BITS_PER_LONG 32
-
-#endif /* __KERNEL__ */
-
-#endif /* _M68K_TYPES_H */
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
index f4043ae..2b3ca0b 100644
--- a/arch/m68k/include/asm/unaligned.h
+++ b/arch/m68k/include/asm/unaligned.h
@@ -2,7 +2,7 @@
 #define _ASM_M68K_UNALIGNED_H
 
 
-#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000)
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
 #include <linux/unaligned/be_struct.h>
 #include <linux/unaligned/le_byteshift.h>
 #include <linux/unaligned/generic.h>
@@ -12,7 +12,7 @@
 
 #else
 /*
- * The m68k can do unaligned accesses itself. 
+ * The m68k can do unaligned accesses itself.
  */
 #include <linux/unaligned/access_ok.h>
 #include <linux/unaligned/generic.h>
diff --git a/arch/m68k/include/asm/xor.h b/arch/m68k/include/asm/xor.h
deleted file mode 100644
index c82eb12..0000000
--- a/arch/m68k/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 7dc186b..71fb299 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -218,13 +218,10 @@
 	printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
 #endif
 
-	pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
-		 "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
-		 (int) &_sdata, (int) &_edata,
-		 (int) &_sbss, (int) &_ebss);
-	pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
-		 (int) &_ebss, (int) memory_start,
-		 (int) memory_start, (int) memory_end);
+	pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
+		 _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
+	pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
+		 __bss_stop, memory_start, memory_start, memory_end);
 
 	/* Keep a copy of command line */
 	*cmdline_p = &command_line[0];
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 8623f8d..9a5932e 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -479,9 +479,13 @@
 			goto bad_access;
 		}
 
-		mem_value = *mem;
+		/*
+		 * No need to check for EFAULT; we know that the page is
+		 * present and writable.
+		 */
+		__get_user(mem_value, mem);
 		if (mem_value == oldval)
-			*mem = newval;
+			__put_user(newval, mem);
 
 		pte_unmap_unlock(pte, ptl);
 		up_read(&mm->mmap_sem);
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 40e02d9..06a763f 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -78,9 +78,7 @@
 		__init_end = .;
 	}
 
-	_sbss = .;
 	BSS_SECTION(0, 0, 0)
-	_ebss = .;
 
 	_end = .;
 
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 63407c8..d099359 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -31,9 +31,7 @@
 
   RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
 
-  _sbss = .;
   BSS_SECTION(0, 0, 0)
-  _ebss = .;
 
   _edata = .;			/* End of data section */
 
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index ad0f46d..8080469 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -44,9 +44,7 @@
 	. = ALIGN(PAGE_SIZE);
 	__init_end = .;
 
-  _sbss = .;
   BSS_SECTION(0, 0, 0)
-  _ebss = .;
 
   _end = . ;
 
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 79e928a..ee5f0b1 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -19,7 +19,7 @@
 the Free Software Foundation, 59 Temple Place - Suite 330,
 Boston, MA 02111-1307, USA.  */
 
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
 
 #define SI_TYPE_SIZE 32
 #define __BITS4 (SI_TYPE_SIZE / 4)
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index f77f258..282f9de 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -104,7 +104,7 @@
 		MLK_ROUNDUP(__init_begin, __init_end),
 		MLK_ROUNDUP(_stext, _etext),
 		MLK_ROUNDUP(_sdata, _edata),
-		MLK_ROUNDUP(_sbss, _ebss));
+		MLK_ROUNDUP(__bss_start, __bss_stop));
 }
 
 void __init mem_init(void)
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 345ec0d..688e366 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -91,7 +91,7 @@
 	totalram_pages = free_all_bootmem();
 
 	codek = (_etext - _stext) >> 10;
-	datak = (_ebss - _sdata) >> 10;
+	datak = (__bss_stop - _sdata) >> 10;
 	initk = (__init_begin - __init_end) >> 10;
 
 	tmp = nr_free_pages() << PAGE_SHIFT;
diff --git a/arch/m68k/platform/68328/head-de2.S b/arch/m68k/platform/68328/head-de2.S
index f632fdc..537d324 100644
--- a/arch/m68k/platform/68328/head-de2.S
+++ b/arch/m68k/platform/68328/head-de2.S
@@ -60,8 +60,8 @@
  *	Move ROM filesystem above bss :-)
  */
 
-	moveal	#_sbss, %a0			/* romfs at the start of bss */
-	moveal	#_ebss, %a1			/* Set up destination  */
+	moveal	#__bss_start, %a0		/* romfs at the start of bss */
+	moveal	#__bss_stop, %a1		/* Set up destination  */
 	movel	%a0, %a2			/* Copy of bss start */
 
 	movel	8(%a0), %d1			/* Get size of ROMFS */
@@ -84,8 +84,8 @@
  * Initialize BSS segment to 0
  */
 
-	lea	_sbss, %a0
-	lea	_ebss, %a1
+	lea	__bss_start, %a0
+	lea	__bss_stop, %a1
 
 	/* Copy 0 to %a0 until %a0 == %a1 */
 2:	cmpal	%a0, %a1
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
index 2ebfd64..45a9dad 100644
--- a/arch/m68k/platform/68328/head-pilot.S
+++ b/arch/m68k/platform/68328/head-pilot.S
@@ -110,7 +110,7 @@
 	movel	#CONFIG_VECTORBASE, %d7
 	addl	#16, %d7
 	moveal	%d7, %a0
-	moveal	#_ebss, %a1
+	moveal	#__bss_stop, %a1
 	lea	%a1@(512), %a2
 
 	DBG_PUTC('C')
@@ -138,8 +138,8 @@
 
 	DBG_PUTC('E')
 
-	moveal	#_sbss, %a0
-	moveal	#_ebss, %a1
+	moveal	#__bss_start, %a0
+	moveal	#__bss_stop, %a1
 
 	/* Copy 0 to %a0 until %a0 == %a1 */
 L1:
@@ -150,7 +150,7 @@
 	DBG_PUTC('F')
 
 	/* Copy command line from end of bss to command line */
-	moveal	#_ebss, %a0
+	moveal	#__bss_stop, %a0
 	moveal	#command_line, %a1
 	lea	%a1@(512), %a2
 
@@ -165,7 +165,7 @@
 
 	movel	#_sdata, %d0	
 	movel	%d0, _rambase	
-	movel	#_ebss,	%d0
+	movel	#__bss_stop, %d0
 	movel	%d0, _ramstart
 
 	movel	%a4, %d0
diff --git a/arch/m68k/platform/68328/head-ram.S b/arch/m68k/platform/68328/head-ram.S
index 7f1aeea..5189ef9 100644
--- a/arch/m68k/platform/68328/head-ram.S
+++ b/arch/m68k/platform/68328/head-ram.S
@@ -76,8 +76,8 @@
 	beq	pclp3
 #endif /* DEBUG */
 	moveal	#0x007ffff0, %ssp
-	moveal	#_sbss, %a0
-	moveal	#_ebss, %a1
+	moveal	#__bss_start, %a0
+	moveal	#__bss_stop, %a1
 
 	/* Copy 0 to %a0 until %a0 >= %a1 */
 L1:
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
index a5ff96d..3dff98b 100644
--- a/arch/m68k/platform/68328/head-rom.S
+++ b/arch/m68k/platform/68328/head-rom.S
@@ -59,8 +59,8 @@
 	cmpal	%a1, %a2
 	bhi	1b
 
-	moveal	#_sbss, %a0
-	moveal	#_ebss, %a1
+	moveal	#__bss_start, %a0
+	moveal	#__bss_stop, %a1
 	/* Copy 0 to %a0 until %a0 == %a1 */
 	
 1:
@@ -70,7 +70,7 @@
 
         movel   #_sdata, %d0    
         movel   %d0, _rambase        
-        movel   #_ebss, %d0
+        movel   #__bss_stop, %d0
         movel   %d0, _ramstart
 	movel	#RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
 	movel	%d0, _ramend
diff --git a/arch/m68k/platform/68360/head-ram.S b/arch/m68k/platform/68360/head-ram.S
index 8eb94fb..acd2131 100644
--- a/arch/m68k/platform/68360/head-ram.S
+++ b/arch/m68k/platform/68360/head-ram.S
@@ -219,8 +219,8 @@
 	cmp.l	#_edata, %a1
 	blt     LD1
 
-	moveal	#_sbss, %a0
-	moveal	#_ebss, %a1
+	moveal	#__bss_start, %a0
+	moveal	#__bss_stop, %a1
 
 	/* Copy 0 to %a0 until %a0 == %a1 */
 L1:
@@ -234,7 +234,7 @@
 store_ram_size:
 	/* Set ram size information */
 	move.l	#_sdata, _rambase
-	move.l	#_ebss, _ramstart
+	move.l	#__bss_stop, _ramstart
 	move.l	#RAMEND, %d0
 	sub.l	#0x1000, %d0			/* Reserve 4K for stack space.*/
 	move.l	%d0, _ramend			/* Different from RAMEND.*/
diff --git a/arch/m68k/platform/68360/head-rom.S b/arch/m68k/platform/68360/head-rom.S
index 97510e5..dfc756d 100644
--- a/arch/m68k/platform/68360/head-rom.S
+++ b/arch/m68k/platform/68360/head-rom.S
@@ -13,7 +13,7 @@
  */
 
 .global _stext
-.global _sbss
+.global __bss_start
 .global _start
 
 .global _rambase
@@ -229,8 +229,8 @@
 	cmp.l	#_edata, %a1
 	blt	LD1
 
-	moveal	#_sbss, %a0
-	moveal	#_ebss, %a1
+	moveal	#__bss_start, %a0
+	moveal	#__bss_stop, %a1
 
 	/* Copy 0 to %a0 until %a0 == %a1 */
 L1:
@@ -244,7 +244,7 @@
 store_ram_size:
 	/* Set ram size information */
 	move.l	#_sdata, _rambase
-	move.l	#_ebss, _ramstart
+	move.l	#__bss_stop, _ramstart
 	move.l	#RAMEND, %d0
 	sub.l	#0x1000, %d0			/* Reserve 4K for stack space.*/
 	move.l	%d0, _ramend			/* Different from RAMEND.*/
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index 4e0c9eb..b88f571 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -230,8 +230,8 @@
 	/*
 	 *	Move ROM filesystem above bss :-)
 	 */
-	lea	_sbss,%a0			/* get start of bss */
-	lea	_ebss,%a1			/* set up destination  */
+	lea	__bss_start,%a0			/* get start of bss */
+	lea	__bss_stop,%a1			/* set up destination  */
 	movel	%a0,%a2				/* copy of bss start */
 
 	movel	8(%a0),%d0			/* get size of ROMFS */
@@ -249,7 +249,7 @@
 	bne	_copy_romfs
 
 #else /* CONFIG_ROMFS_FS */
-	lea	_ebss,%a1
+	lea	__bss_stop,%a1
 	movel	%a1,_ramstart
 #endif /* CONFIG_ROMFS_FS */
 
@@ -257,8 +257,8 @@
 	/*
 	 *	Zero out the bss region.
 	 */
-	lea	_sbss,%a0			/* get start of bss */
-	lea	_ebss,%a1			/* get end of bss */
+	lea	__bss_start,%a0			/* get start of bss */
+	lea	__bss_stop,%a1			/* get end of bss */
 	clrl	%d0				/* set value */
 _clear_bss:
 	movel	%d0,(%a0)+			/* clear each word */
diff --git a/arch/m68k/sun3/prom/init.c b/arch/m68k/sun3/prom/init.c
index d8e6349..eeba067 100644
--- a/arch/m68k/sun3/prom/init.c
+++ b/arch/m68k/sun3/prom/init.c
@@ -22,57 +22,13 @@
 struct linux_nodeops *prom_nodeops;
 
 /* You must call prom_init() before you attempt to use any of the
- * routines in the prom library.  It returns 0 on success, 1 on
- * failure.  It gets passed the pointer to the PROM vector.
+ * routines in the prom library.
+ * It gets passed the pointer to the PROM vector.
  */
 
-extern void prom_meminit(void);
-extern void prom_ranges_init(void);
-
 void __init prom_init(struct linux_romvec *rp)
 {
 	romvec = rp;
-#ifndef CONFIG_SUN3
-	switch(romvec->pv_romvers) {
-	case 0:
-		prom_vers = PROM_V0;
-		break;
-	case 2:
-		prom_vers = PROM_V2;
-		break;
-	case 3:
-		prom_vers = PROM_V3;
-		break;
-	case 4:
-		prom_vers = PROM_P1275;
-		prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
-		prom_halt();
-		break;
-	default:
-		prom_printf("PROMLIB: Bad PROM version %d\n",
-			    romvec->pv_romvers);
-		prom_halt();
-		break;
-	};
-
-	prom_rev = romvec->pv_plugin_revision;
-	prom_prev = romvec->pv_printrev;
-	prom_nodeops = romvec->pv_nodeops;
-
-	prom_root_node = prom_getsibling(0);
-	if((prom_root_node == 0) || (prom_root_node == -1))
-		prom_halt();
-
-	if((((unsigned long) prom_nodeops) == 0) ||
-	   (((unsigned long) prom_nodeops) == -1))
-		prom_halt();
-
-	prom_meminit();
-
-	prom_ranges_init();
-#endif
-//	printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
-//	       romvec->pv_romvers, prom_rev);
 
 	/* Initialization successful. */
 	return;
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index 4487e15..c07ed5d 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -18,10 +18,6 @@
 extern unsigned long __ivt_start[], __ivt_end[];
 extern char _etext[], _stext[];
 
-#  ifdef CONFIG_MTD_UCLINUX
-extern char *_ebss;
-#  endif
-
 extern u32 _fdt_start[], _fdt_end[];
 
 # endif /* !__ASSEMBLY__ */
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index bb4907c..2b25bcf 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -21,9 +21,6 @@
 #include <linux/ftrace.h>
 #include <linux/uaccess.h>
 
-extern char *_ebss;
-EXPORT_SYMBOL_GPL(_ebss);
-
 #ifdef CONFIG_FUNCTION_TRACER
 extern void _mcount(void);
 EXPORT_SYMBOL(_mcount);
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 16d8dfd..4da971d 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -121,7 +121,7 @@
 
 	/* Move ROMFS out of BSS before clearing it */
 	if (romfs_size > 0) {
-		memmove(&_ebss, (int *)romfs_base, romfs_size);
+		memmove(&__bss_stop, (int *)romfs_base, romfs_size);
 		klimit += romfs_size;
 	}
 #endif
@@ -165,7 +165,7 @@
 	BUG_ON(romfs_size < 0); /* What else can we do? */
 
 	printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
-			romfs_size, romfs_base, (unsigned)&_ebss);
+			romfs_size, romfs_base, (unsigned)&__bss_stop);
 
 	printk("New klimit: 0x%08x\n", (unsigned)klimit);
 #endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 109e9d8..936d01a 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -131,7 +131,6 @@
 			*(COMMON)
 		. = ALIGN (4) ;
 		__bss_stop = . ;
-		_ebss = . ;
 	}
 	. = ALIGN(PAGE_SIZE);
 	_end = .;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 76de6b6..107610e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -124,6 +124,7 @@
 	select GENERIC_TIME_VSYSCALL
 	select GENERIC_CLOCKEVENTS
 	select KTIME_SCALAR if 32BIT
+	select HAVE_ARCH_SECCOMP_FILTER
 
 config SCHED_OMIT_FRAME_POINTER
 	def_bool y
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index 0fb3402..a60d085 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -4,13 +4,11 @@
 #ifdef CONFIG_64BIT
 
 #define SECTION_SIZE_BITS	28
-#define MAX_PHYSADDR_BITS	46
 #define MAX_PHYSMEM_BITS	46
 
 #else
 
 #define SECTION_SIZE_BITS	25
-#define MAX_PHYSADDR_BITS	31
 #define MAX_PHYSMEM_BITS	31
 
 #endif /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fb214dd..fe7b997 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -12,6 +12,7 @@
 #ifndef _ASM_SYSCALL_H
 #define _ASM_SYSCALL_H	1
 
+#include <linux/audit.h>
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <asm/ptrace.h>
@@ -87,4 +88,13 @@
 		regs->orig_gpr2 = args[0];
 }
 
+static inline int syscall_get_arch(struct task_struct *task,
+				   struct pt_regs *regs)
+{
+#ifdef CONFIG_COMPAT
+	if (test_tsk_thread_flag(task, TIF_31BIT))
+		return AUDIT_ARCH_S390;
+#endif
+	return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
+}
 #endif	/* _ASM_SYSCALL_H */
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index d122508..f606d93 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -620,7 +620,6 @@
 		return -EFAULT;
 	if (a.offset & ~PAGE_MASK)
 		return -EINVAL;
-	a.addr = (unsigned long) compat_ptr(a.addr);
 	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
 			      a.offset >> PAGE_SHIFT);
 }
@@ -631,7 +630,6 @@
 
 	if (copy_from_user(&a, arg, sizeof(a)))
 		return -EFAULT;
-	a.addr = (unsigned long) compat_ptr(a.addr);
 	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
 }
 
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index e835d6d..2d82cfc 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1635,7 +1635,7 @@
 	llgfr	%r6,%r6			# unsigned long
 	llgf	%r0,164(%r15)		# unsigned long
 	stg	%r0,160(%r15)
-	jg	sys_process_vm_readv
+	jg	compat_sys_process_vm_readv
 
 ENTRY(compat_sys_process_vm_writev_wrapper)
 	lgfr	%r2,%r2			# compat_pid_t
@@ -1645,4 +1645,4 @@
 	llgfr	%r6,%r6			# unsigned long
 	llgf	%r0,164(%r15)		# unsigned long
 	stg	%r0,160(%r15)
-	jg	sys_process_vm_writev
+	jg	compat_sys_process_vm_writev
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index f4eb376..e4be113 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -719,7 +719,11 @@
 	long ret = 0;
 
 	/* Do the secure computing check first. */
-	secure_computing_strict(regs->gprs[2]);
+	if (secure_computing(regs->gprs[2])) {
+		/* seccomp failures shouldn't expose any additional code. */
+		ret = -1;
+		goto out;
+	}
 
 	/*
 	 * The sysc_tracesys code in entry.S stored the system
@@ -745,6 +749,7 @@
 			    regs->gprs[2], regs->orig_gpr2,
 			    regs->gprs[3], regs->gprs[4],
 			    regs->gprs[5]);
+out:
 	return ret ?: regs->gprs[2];
 }
 
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index b4a29ee..d0964d2 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -81,11 +81,12 @@
 {
 	unsigned int ret;
 
-	if (current->personality == PER_LINUX32 && personality == PER_LINUX)
-		personality = PER_LINUX32;
+	if (personality(current->personality) == PER_LINUX32 &&
+	    personality(personality) == PER_LINUX)
+		personality |= PER_LINUX32;
 	ret = sys_personality(personality);
-	if (ret == PER_LINUX32)
-		ret = PER_LINUX;
+	if (personality(ret) == PER_LINUX32)
+		ret &= ~PER_LINUX32;
 
 	return ret;
 }
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 4c171f1..b225656 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -335,7 +335,7 @@
 
 	for (n = 0; n < NR_DMAE; n++) {
 		int i = request_irq(get_dma_error_irq(n), dma_err,
-				    IRQF_SHARED, dmae_name[n], NULL);
+				    IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
 		if (unlikely(i < 0)) {
 			printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
 			return i;
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 4a53500..1b61997 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -6,7 +6,6 @@
 extern long __nosave_begin, __nosave_end;
 extern long __machvec_start, __machvec_end;
 extern char __uncached_start, __uncached_end;
-extern char _ebss[];
 extern char __start_eh_frame[], __stop_eh_frame[];
 
 #endif /* __ASM_SH_SECTIONS_H */
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
index 48d1449..2a0ca87 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
@@ -183,18 +183,30 @@
 	GPIO_FN_DV_DATA1, GPIO_FN_DV_DATA0,
 	GPIO_FN_LCD_CLK, GPIO_FN_LCD_EXTCLK,
 	GPIO_FN_LCD_VSYNC, GPIO_FN_LCD_HSYNC, GPIO_FN_LCD_DE,
-	GPIO_FN_LCD_DATA23, GPIO_FN_LCD_DATA22,
-	GPIO_FN_LCD_DATA21, GPIO_FN_LCD_DATA20,
-	GPIO_FN_LCD_DATA19, GPIO_FN_LCD_DATA18,
-	GPIO_FN_LCD_DATA17, GPIO_FN_LCD_DATA16,
-	GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14,
-	GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12,
-	GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10,
-	GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8,
-	GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6,
-	GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4,
-	GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2,
-	GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0,
+	GPIO_FN_LCD_DATA23_PG23, GPIO_FN_LCD_DATA22_PG22,
+	GPIO_FN_LCD_DATA21_PG21, GPIO_FN_LCD_DATA20_PG20,
+	GPIO_FN_LCD_DATA19_PG19, GPIO_FN_LCD_DATA18_PG18,
+	GPIO_FN_LCD_DATA17_PG17, GPIO_FN_LCD_DATA16_PG16,
+	GPIO_FN_LCD_DATA15_PG15, GPIO_FN_LCD_DATA14_PG14,
+	GPIO_FN_LCD_DATA13_PG13, GPIO_FN_LCD_DATA12_PG12,
+	GPIO_FN_LCD_DATA11_PG11, GPIO_FN_LCD_DATA10_PG10,
+	GPIO_FN_LCD_DATA9_PG9, GPIO_FN_LCD_DATA8_PG8,
+	GPIO_FN_LCD_DATA7_PG7, GPIO_FN_LCD_DATA6_PG6,
+	GPIO_FN_LCD_DATA5_PG5, GPIO_FN_LCD_DATA4_PG4,
+	GPIO_FN_LCD_DATA3_PG3, GPIO_FN_LCD_DATA2_PG2,
+	GPIO_FN_LCD_DATA1_PG1, GPIO_FN_LCD_DATA0_PG0,
+	GPIO_FN_LCD_DATA23_PJ23, GPIO_FN_LCD_DATA22_PJ22,
+	GPIO_FN_LCD_DATA21_PJ21, GPIO_FN_LCD_DATA20_PJ20,
+	GPIO_FN_LCD_DATA19_PJ19, GPIO_FN_LCD_DATA18_PJ18,
+	GPIO_FN_LCD_DATA17_PJ17, GPIO_FN_LCD_DATA16_PJ16,
+	GPIO_FN_LCD_DATA15_PJ15, GPIO_FN_LCD_DATA14_PJ14,
+	GPIO_FN_LCD_DATA13_PJ13, GPIO_FN_LCD_DATA12_PJ12,
+	GPIO_FN_LCD_DATA11_PJ11, GPIO_FN_LCD_DATA10_PJ10,
+	GPIO_FN_LCD_DATA9_PJ9, GPIO_FN_LCD_DATA8_PJ8,
+	GPIO_FN_LCD_DATA7_PJ7, GPIO_FN_LCD_DATA6_PJ6,
+	GPIO_FN_LCD_DATA5_PJ5, GPIO_FN_LCD_DATA4_PJ4,
+	GPIO_FN_LCD_DATA3_PJ3, GPIO_FN_LCD_DATA2_PJ2,
+	GPIO_FN_LCD_DATA1_PJ1, GPIO_FN_LCD_DATA0_PJ0,
 	GPIO_FN_LCD_M_DISP,
 };
 
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
index f25127c..039e458 100644
--- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
@@ -758,12 +758,22 @@
 	DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK,
 	LCD_CLK_MARK, LCD_EXTCLK_MARK,
 	LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK,
-	LCD_DATA23_MARK, LCD_DATA22_MARK, LCD_DATA21_MARK, LCD_DATA20_MARK,
-	LCD_DATA19_MARK, LCD_DATA18_MARK, LCD_DATA17_MARK, LCD_DATA16_MARK,
-	LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
-	LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
-	LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
-	LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+	LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK,
+	LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK,
+	LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK,
+	LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK,
+	LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK,
+	LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK,
+	LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK,
+	LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK,
+	LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK,
+	LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK,
+	LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK,
+	LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK,
+	LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK,
+	LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK,
+	LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK,
+	LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK,
 	LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK,
 	LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK,
 	LCD_M_DISP_MARK,
@@ -1036,6 +1046,7 @@
 
 	PINMUX_DATA(PF1_DATA, PF1MD_000),
 	PINMUX_DATA(BACK_MARK, PF1MD_001),
+	PINMUX_DATA(SSL10_MARK, PF1MD_011),
 	PINMUX_DATA(TIOC4B_MARK, PF1MD_100),
 	PINMUX_DATA(DACK0_MARK, PF1MD_101),
 
@@ -1049,47 +1060,50 @@
 	PINMUX_DATA(PG27_DATA, PG27MD_00),
 	PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10),
 	PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11),
+	PINMUX_DATA(LCD_DE_MARK, PG27MD_11),
 
 	PINMUX_DATA(PG26_DATA, PG26MD_00),
 	PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10),
+	PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10),
 
 	PINMUX_DATA(PG25_DATA, PG25MD_00),
 	PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10),
+	PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10),
 
 	PINMUX_DATA(PG24_DATA, PG24MD_00),
 	PINMUX_DATA(LCD_CLK_MARK, PG24MD_10),
 
 	PINMUX_DATA(PG23_DATA, PG23MD_000),
-	PINMUX_DATA(LCD_DATA23_MARK, PG23MD_010),
+	PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010),
 	PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011),
 	PINMUX_DATA(TXD5_MARK, PG23MD_100),
 
 	PINMUX_DATA(PG22_DATA, PG22MD_000),
-	PINMUX_DATA(LCD_DATA22_MARK, PG22MD_010),
+	PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010),
 	PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011),
 	PINMUX_DATA(RXD5_MARK, PG22MD_100),
 
 	PINMUX_DATA(PG21_DATA, PG21MD_000),
 	PINMUX_DATA(DV_DATA7_MARK, PG21MD_001),
-	PINMUX_DATA(LCD_DATA21_MARK, PG21MD_010),
+	PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010),
 	PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011),
 	PINMUX_DATA(TXD4_MARK, PG21MD_100),
 
 	PINMUX_DATA(PG20_DATA, PG20MD_000),
 	PINMUX_DATA(DV_DATA6_MARK, PG20MD_001),
-	PINMUX_DATA(LCD_DATA20_MARK, PG21MD_010),
+	PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010),
 	PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011),
 	PINMUX_DATA(RXD4_MARK, PG20MD_100),
 
 	PINMUX_DATA(PG19_DATA, PG19MD_000),
 	PINMUX_DATA(DV_DATA5_MARK, PG19MD_001),
-	PINMUX_DATA(LCD_DATA19_MARK, PG19MD_010),
+	PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010),
 	PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011),
 	PINMUX_DATA(SCK5_MARK, PG19MD_100),
 
 	PINMUX_DATA(PG18_DATA, PG18MD_000),
 	PINMUX_DATA(DV_DATA4_MARK, PG18MD_001),
-	PINMUX_DATA(LCD_DATA18_MARK, PG18MD_010),
+	PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010),
 	PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011),
 	PINMUX_DATA(SCK4_MARK, PG18MD_100),
 
@@ -1097,103 +1111,103 @@
 // we're going with 2 bits
 	PINMUX_DATA(PG17_DATA, PG17MD_00),
 	PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01),
-	PINMUX_DATA(LCD_DATA17_MARK, PG17MD_10),
+	PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10),
 
 // TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description
 // we're going with 2 bits
 	PINMUX_DATA(PG16_DATA, PG16MD_00),
 	PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01),
-	PINMUX_DATA(LCD_DATA16_MARK, PG16MD_10),
+	PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10),
 
 	PINMUX_DATA(PG15_DATA, PG15MD_00),
 	PINMUX_DATA(D31_MARK, PG15MD_01),
-	PINMUX_DATA(LCD_DATA15_MARK, PG15MD_10),
+	PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10),
 	PINMUX_DATA(PINT7_PG_MARK, PG15MD_11),
 
 	PINMUX_DATA(PG14_DATA, PG14MD_00),
 	PINMUX_DATA(D30_MARK, PG14MD_01),
-	PINMUX_DATA(LCD_DATA14_MARK, PG14MD_10),
+	PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10),
 	PINMUX_DATA(PINT6_PG_MARK, PG14MD_11),
 
 	PINMUX_DATA(PG13_DATA, PG13MD_00),
 	PINMUX_DATA(D29_MARK, PG13MD_01),
-	PINMUX_DATA(LCD_DATA13_MARK, PG13MD_10),
+	PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10),
 	PINMUX_DATA(PINT5_PG_MARK, PG13MD_11),
 
 	PINMUX_DATA(PG12_DATA, PG12MD_00),
 	PINMUX_DATA(D28_MARK, PG12MD_01),
-	PINMUX_DATA(LCD_DATA12_MARK, PG12MD_10),
+	PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10),
 	PINMUX_DATA(PINT4_PG_MARK, PG12MD_11),
 
 	PINMUX_DATA(PG11_DATA, PG11MD_000),
 	PINMUX_DATA(D27_MARK, PG11MD_001),
-	PINMUX_DATA(LCD_DATA11_MARK, PG11MD_010),
+	PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010),
 	PINMUX_DATA(PINT3_PG_MARK, PG11MD_011),
 	PINMUX_DATA(TIOC3D_MARK, PG11MD_100),
 
 	PINMUX_DATA(PG10_DATA, PG10MD_000),
 	PINMUX_DATA(D26_MARK, PG10MD_001),
-	PINMUX_DATA(LCD_DATA10_MARK, PG10MD_010),
+	PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010),
 	PINMUX_DATA(PINT2_PG_MARK, PG10MD_011),
 	PINMUX_DATA(TIOC3C_MARK, PG10MD_100),
 
 	PINMUX_DATA(PG9_DATA, PG9MD_000),
 	PINMUX_DATA(D25_MARK, PG9MD_001),
-	PINMUX_DATA(LCD_DATA9_MARK, PG9MD_010),
+	PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010),
 	PINMUX_DATA(PINT1_PG_MARK, PG9MD_011),
 	PINMUX_DATA(TIOC3B_MARK, PG9MD_100),
 
 	PINMUX_DATA(PG8_DATA, PG8MD_000),
 	PINMUX_DATA(D24_MARK, PG8MD_001),
-	PINMUX_DATA(LCD_DATA8_MARK, PG8MD_010),
+	PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010),
 	PINMUX_DATA(PINT0_PG_MARK, PG8MD_011),
 	PINMUX_DATA(TIOC3A_MARK, PG8MD_100),
 
 	PINMUX_DATA(PG7_DATA, PG7MD_000),
 	PINMUX_DATA(D23_MARK, PG7MD_001),
-	PINMUX_DATA(LCD_DATA7_MARK, PG7MD_010),
+	PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010),
 	PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011),
 	PINMUX_DATA(TIOC2B_MARK, PG7MD_100),
 
 	PINMUX_DATA(PG6_DATA, PG6MD_000),
 	PINMUX_DATA(D22_MARK, PG6MD_001),
-	PINMUX_DATA(LCD_DATA6_MARK, PG6MD_010),
+	PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010),
 	PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011),
 	PINMUX_DATA(TIOC2A_MARK, PG6MD_100),
 
 	PINMUX_DATA(PG5_DATA, PG5MD_000),
 	PINMUX_DATA(D21_MARK, PG5MD_001),
-	PINMUX_DATA(LCD_DATA5_MARK, PG5MD_010),
+	PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010),
 	PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011),
 	PINMUX_DATA(TIOC1B_MARK, PG5MD_100),
 
 	PINMUX_DATA(PG4_DATA, PG4MD_000),
 	PINMUX_DATA(D20_MARK, PG4MD_001),
-	PINMUX_DATA(LCD_DATA4_MARK, PG4MD_010),
+	PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010),
 	PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011),
 	PINMUX_DATA(TIOC1A_MARK, PG4MD_100),
 
 	PINMUX_DATA(PG3_DATA, PG3MD_000),
 	PINMUX_DATA(D19_MARK, PG3MD_001),
-	PINMUX_DATA(LCD_DATA3_MARK, PG3MD_010),
+	PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010),
 	PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011),
 	PINMUX_DATA(TIOC0D_MARK, PG3MD_100),
 
 	PINMUX_DATA(PG2_DATA, PG2MD_000),
 	PINMUX_DATA(D18_MARK, PG2MD_001),
-	PINMUX_DATA(LCD_DATA2_MARK, PG2MD_010),
+	PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010),
 	PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011),
 	PINMUX_DATA(TIOC0C_MARK, PG2MD_100),
 
 	PINMUX_DATA(PG1_DATA, PG1MD_000),
 	PINMUX_DATA(D17_MARK, PG1MD_001),
-	PINMUX_DATA(LCD_DATA1_MARK, PG1MD_010),
+	PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010),
 	PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011),
 	PINMUX_DATA(TIOC0B_MARK, PG1MD_100),
 
 	PINMUX_DATA(PG0_DATA, PG0MD_000),
 	PINMUX_DATA(D16_MARK, PG0MD_001),
-	PINMUX_DATA(LCD_DATA0_MARK, PG0MD_010),
+	PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010),
 	PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011),
 	PINMUX_DATA(TIOC0A_MARK, PG0MD_100),
 
@@ -1275,14 +1289,14 @@
 
 	PINMUX_DATA(PJ23_DATA, PJ23MD_000),
 	PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001),
-	PINMUX_DATA(LCD_DATA23_MARK, PJ23MD_010),
+	PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
 	PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
 	PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
 	PINMUX_DATA(CTX1_MARK, PJ23MD_101),
 
 	PINMUX_DATA(PJ22_DATA, PJ22MD_000),
 	PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
-	PINMUX_DATA(LCD_DATA22_MARK, PJ22MD_010),
+	PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
 	PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
 	PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
 	PINMUX_DATA(CRX1_MARK, PJ22MD_101),
@@ -1290,14 +1304,14 @@
 
 	PINMUX_DATA(PJ21_DATA, PJ21MD_000),
 	PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
-	PINMUX_DATA(LCD_DATA21_MARK, PJ21MD_010),
+	PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
 	PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
 	PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
 	PINMUX_DATA(CTX2_MARK, PJ21MD_101),
 
 	PINMUX_DATA(PJ20_DATA, PJ20MD_000),
 	PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
-	PINMUX_DATA(LCD_DATA20_MARK, PJ20MD_010),
+	PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
 	PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
 	PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
 	PINMUX_DATA(CRX2_MARK, PJ20MD_101),
@@ -1305,7 +1319,7 @@
 
 	PINMUX_DATA(PJ19_DATA, PJ19MD_000),
 	PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
-	PINMUX_DATA(LCD_DATA19_MARK, PJ19MD_010),
+	PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010),
 	PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011),
 	PINMUX_DATA(TIOC0D_MARK, PJ19MD_100),
 	PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101),
@@ -1313,126 +1327,126 @@
 
 	PINMUX_DATA(PJ18_DATA, PJ18MD_000),
 	PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001),
-	PINMUX_DATA(LCD_DATA18_MARK, PJ18MD_010),
+	PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010),
 	PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011),
 	PINMUX_DATA(TIOC0C_MARK, PJ18MD_100),
 	PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101),
 
 	PINMUX_DATA(PJ17_DATA, PJ17MD_000),
 	PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001),
-	PINMUX_DATA(LCD_DATA17_MARK, PJ17MD_010),
+	PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010),
 	PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011),
 	PINMUX_DATA(TIOC0B_MARK, PJ17MD_100),
 	PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101),
 
 	PINMUX_DATA(PJ16_DATA, PJ16MD_000),
 	PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001),
-	PINMUX_DATA(LCD_DATA16_MARK, PJ16MD_010),
+	PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010),
 	PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011),
 	PINMUX_DATA(TIOC0A_MARK, PJ16MD_100),
 	PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101),
 
 	PINMUX_DATA(PJ15_DATA, PJ15MD_000),
 	PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001),
-	PINMUX_DATA(LCD_DATA15_MARK, PJ15MD_010),
+	PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010),
 	PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011),
 	PINMUX_DATA(PWM2H_MARK, PJ15MD_100),
 	PINMUX_DATA(TXD7_MARK, PJ15MD_101),
 
 	PINMUX_DATA(PJ14_DATA, PJ14MD_000),
 	PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001),
-	PINMUX_DATA(LCD_DATA14_MARK, PJ14MD_010),
+	PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010),
 	PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011),
 	PINMUX_DATA(PWM2G_MARK, PJ14MD_100),
 	PINMUX_DATA(TXD6_MARK, PJ14MD_101),
 
 	PINMUX_DATA(PJ13_DATA, PJ13MD_000),
 	PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001),
-	PINMUX_DATA(LCD_DATA13_MARK, PJ13MD_010),
+	PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010),
 	PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011),
 	PINMUX_DATA(PWM2F_MARK, PJ13MD_100),
 	PINMUX_DATA(TXD5_MARK, PJ13MD_101),
 
 	PINMUX_DATA(PJ12_DATA, PJ12MD_000),
 	PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001),
-	PINMUX_DATA(LCD_DATA12_MARK, PJ12MD_010),
+	PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010),
 	PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011),
 	PINMUX_DATA(PWM2E_MARK, PJ12MD_100),
 	PINMUX_DATA(SCK7_MARK, PJ12MD_101),
 
 	PINMUX_DATA(PJ11_DATA, PJ11MD_000),
 	PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001),
-	PINMUX_DATA(LCD_DATA11_MARK, PJ11MD_010),
+	PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010),
 	PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011),
 	PINMUX_DATA(PWM2D_MARK, PJ11MD_100),
 	PINMUX_DATA(SCK6_MARK, PJ11MD_101),
 
 	PINMUX_DATA(PJ10_DATA, PJ10MD_000),
 	PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001),
-	PINMUX_DATA(LCD_DATA10_MARK, PJ10MD_010),
+	PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010),
 	PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011),
 	PINMUX_DATA(PWM2C_MARK, PJ10MD_100),
 	PINMUX_DATA(SCK5_MARK, PJ10MD_101),
 
 	PINMUX_DATA(PJ9_DATA, PJ9MD_000),
 	PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001),
-	PINMUX_DATA(LCD_DATA9_MARK, PJ9MD_010),
+	PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010),
 	PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011),
 	PINMUX_DATA(PWM2B_MARK, PJ9MD_100),
 	PINMUX_DATA(RTS5_MARK, PJ9MD_101),
 
 	PINMUX_DATA(PJ8_DATA, PJ8MD_000),
 	PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001),
-	PINMUX_DATA(LCD_DATA8_MARK, PJ8MD_010),
+	PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010),
 	PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011),
 	PINMUX_DATA(PWM2A_MARK, PJ8MD_100),
 	PINMUX_DATA(CTS5_MARK, PJ8MD_101),
 
 	PINMUX_DATA(PJ7_DATA, PJ7MD_000),
 	PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001),
-	PINMUX_DATA(LCD_DATA7_MARK, PJ7MD_010),
+	PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010),
 	PINMUX_DATA(SD_D2_MARK, PJ7MD_011),
 	PINMUX_DATA(PWM1H_MARK, PJ7MD_100),
 
 	PINMUX_DATA(PJ6_DATA, PJ6MD_000),
 	PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001),
-	PINMUX_DATA(LCD_DATA6_MARK, PJ6MD_010),
+	PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010),
 	PINMUX_DATA(SD_D3_MARK, PJ6MD_011),
 	PINMUX_DATA(PWM1G_MARK, PJ6MD_100),
 
 	PINMUX_DATA(PJ5_DATA, PJ5MD_000),
 	PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001),
-	PINMUX_DATA(LCD_DATA5_MARK, PJ5MD_010),
+	PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010),
 	PINMUX_DATA(SD_CMD_MARK, PJ5MD_011),
 	PINMUX_DATA(PWM1F_MARK, PJ5MD_100),
 
 	PINMUX_DATA(PJ4_DATA, PJ4MD_000),
 	PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001),
-	PINMUX_DATA(LCD_DATA4_MARK, PJ4MD_010),
+	PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010),
 	PINMUX_DATA(SD_CLK_MARK, PJ4MD_011),
 	PINMUX_DATA(PWM1E_MARK, PJ4MD_100),
 
 	PINMUX_DATA(PJ3_DATA, PJ3MD_000),
 	PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001),
-	PINMUX_DATA(LCD_DATA3_MARK, PJ3MD_010),
+	PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010),
 	PINMUX_DATA(SD_D0_MARK, PJ3MD_011),
 	PINMUX_DATA(PWM1D_MARK, PJ3MD_100),
 
 	PINMUX_DATA(PJ2_DATA, PJ2MD_000),
 	PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001),
-	PINMUX_DATA(LCD_DATA2_MARK, PJ2MD_010),
+	PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010),
 	PINMUX_DATA(SD_D1_MARK, PJ2MD_011),
 	PINMUX_DATA(PWM1C_MARK, PJ2MD_100),
 
 	PINMUX_DATA(PJ1_DATA, PJ1MD_000),
 	PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001),
-	PINMUX_DATA(LCD_DATA1_MARK, PJ1MD_010),
+	PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010),
 	PINMUX_DATA(SD_WP_MARK, PJ1MD_011),
 	PINMUX_DATA(PWM1B_MARK, PJ1MD_100),
 
 	PINMUX_DATA(PJ0_DATA, PJ0MD_000),
 	PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001),
-	PINMUX_DATA(LCD_DATA0_MARK, PJ0MD_010),
+	PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010),
 	PINMUX_DATA(SD_CD_MARK, PJ0MD_011),
 	PINMUX_DATA(PWM1A_MARK, PJ0MD_100),
 };
@@ -1877,30 +1891,55 @@
 	PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
 	PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
 
-	PINMUX_GPIO(GPIO_FN_LCD_DATA23, LCD_DATA23_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA22, LCD_DATA22_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA21, LCD_DATA21_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA20, LCD_DATA20_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA19, LCD_DATA19_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA18, LCD_DATA18_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA17, LCD_DATA17_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA16, LCD_DATA16_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
-	PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK),
+
+	PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK),
+	PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK),
 
 	PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
 };
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 7b57bf1..ebe7a7d 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -273,7 +273,7 @@
 	data_resource.start = virt_to_phys(_etext);
 	data_resource.end = virt_to_phys(_edata)-1;
 	bss_resource.start = virt_to_phys(__bss_start);
-	bss_resource.end = virt_to_phys(_ebss)-1;
+	bss_resource.end = virt_to_phys(__bss_stop)-1;
 
 #ifdef CONFIG_CMDLINE_OVERWRITE
 	strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 3896f26..2a0a596 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -19,7 +19,6 @@
 EXPORT_SYMBOL(csum_partial_copy_generic);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(_ebss);
 EXPORT_SYMBOL(empty_zero_page);
 
 #define DECLARE_EXPORT(name)		\
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index c98905f..db88cbf 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -78,7 +78,6 @@
 	. = ALIGN(PAGE_SIZE);
 	__init_end = .;
 	BSS_SECTION(0, PAGE_SIZE, 4)
-	_ebss = .;			/* uClinux MTD sucks */
 	_end = . ;
 
 	STABS_DEBUG
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
index 84a5776..60164e6 100644
--- a/arch/sh/lib/mcount.S
+++ b/arch/sh/lib/mcount.S
@@ -39,7 +39,7 @@
  *
  * Make sure the stack pointer contains a valid address. Valid
  * addresses for kernel stacks are anywhere after the bss
- * (after _ebss) and anywhere in init_thread_union (init_stack).
+ * (after __bss_stop) and anywhere in init_thread_union (init_stack).
  */
 #define STACK_CHECK()					\
 	mov	#(THREAD_SIZE >> 10), r0;		\
@@ -60,7 +60,7 @@
 	cmp/hi	r2, r1;					\
 	bf	stack_panic;				\
 							\
-	/* If sp > _ebss then we're OK. */		\
+	/* If sp > __bss_stop then we're OK. */		\
 	mov.l	.L_ebss, r1;				\
 	cmp/hi	r1, r15;				\
 	bt	1f;					\
@@ -70,7 +70,7 @@
 	cmp/hs	r1, r15;				\
 	bf	stack_panic;				\
 							\
-	/* If sp > init_stack && sp < _ebss, not OK. */	\
+	/* If sp > init_stack && sp < __bss_stop, not OK. */	\
 	add	r0, r1;					\
 	cmp/hs	r1, r15;				\
 	bt	stack_panic;				\
@@ -292,8 +292,6 @@
 	 nop
 
 	.align 2
-.L_ebss:
-	.long	_ebss
 .L_init_thread_union:
 	.long	init_thread_union
 .Lpanic:
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 0dc1f57..11c6c96 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -502,12 +502,12 @@
 {
 	int ret;
 
-	if (current->personality == PER_LINUX32 &&
-	    personality == PER_LINUX)
-		personality = PER_LINUX32;
+	if (personality(current->personality) == PER_LINUX32 &&
+	    personality(personality) == PER_LINUX)
+		personality |= PER_LINUX32;
 	ret = sys_personality(personality);
-	if (ret == PER_LINUX32)
-		ret = PER_LINUX;
+	if (personality(ret) == PER_LINUX32)
+		ret &= ~PER_LINUX32;
 
 	return ret;
 }
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 6026fdd..d58edf5 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2020,6 +2020,9 @@
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 unsigned long vmemmap_table[VMEMMAP_SIZE];
 
+static long __meminitdata addr_start, addr_end;
+static int __meminitdata node_start;
+
 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
 {
 	unsigned long vstart = (unsigned long) start;
@@ -2050,15 +2053,30 @@
 
 			*vmem_pp = pte_base | __pa(block);
 
-			printk(KERN_INFO "[%p-%p] page_structs=%lu "
-			       "node=%d entry=%lu/%lu\n", start, block, nr,
-			       node,
-			       addr >> VMEMMAP_CHUNK_SHIFT,
-			       VMEMMAP_SIZE);
+			/* check to see if we have contiguous blocks */
+			if (addr_end != addr || node_start != node) {
+				if (addr_start)
+					printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+					       addr_start, addr_end-1, node_start);
+				addr_start = addr;
+				node_start = node;
+			}
+			addr_end = addr + VMEMMAP_CHUNK;
 		}
 	}
 	return 0;
 }
+
+void __meminit vmemmap_populate_print_last(void)
+{
+	if (addr_start) {
+		printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+		       addr_start, addr_end-1, node_start);
+		addr_start = 0;
+		addr_end = 0;
+		node_start = 0;
+	}
+}
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
 static void prot_init_common(unsigned long page_none,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba2657c..8ec3a1a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1527,7 +1527,7 @@
 	  If unsure, say Y. Only embedded should say N here.
 
 config CC_STACKPROTECTOR
-	bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
+	bool "Enable -fstack-protector buffer overflow detection"
 	---help---
 	  This option turns on the -fstack-protector GCC feature. This
 	  feature puts, at the beginning of functions, a canary value on
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index b0c5276..682e9c2 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -27,6 +27,10 @@
 
         KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
 
+        # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
+        # with nonstandard options
+        KBUILD_CFLAGS += -fno-pic
+
         # prevent gcc from keeping the stack 16 byte aligned
         KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
 
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5a747dd..f7535be 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -57,7 +57,7 @@
 		   -Wall -Wstrict-prototypes \
 		   -march=i386 -mregparm=3 \
 		   -include $(srctree)/$(src)/code16gcc.h \
-		   -fno-strict-aliasing -fomit-frame-pointer \
+		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
 		   $(call cc-option, -ffreestanding) \
 		   $(call cc-option, -fno-toplevel-reorder,\
 			$(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 441520e..a3ac52b 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -33,6 +33,14 @@
 #define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */
 #define MCI_STATUS_S	 (1ULL<<56)  /* Signaled machine check */
 #define MCI_STATUS_AR	 (1ULL<<55)  /* Action required */
+#define MCACOD		  0xffff     /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB	0x00C0	/* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK	0xfff0
+#define MCACOD_L3WB	0x017A	/* L3 Explicit Writeback */
+#define MCACOD_DATA	0x0134	/* Data Load */
+#define MCACOD_INSTR	0x0150	/* Instruction Fetch */
 
 /* MCi_MISC register defines */
 #define MCI_MISC_ADDR_LSB(m)	((m) & 0x3f)
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index dab3935..cb4e43b 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -196,11 +196,16 @@
 extern void perf_events_lapic_init(void);
 
 /*
- * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
- * This flag is otherwise unused and ABI specified to be 0, so nobody should
- * care what we do with it.
+ * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
+ * unused and ABI specified to be 0, so nobody should care what we do with
+ * them.
+ *
+ * EXACT - the IP points to the exact instruction that triggered the
+ *         event (HW bugs exempt).
+ * VM    - original X86_VM_MASK; see set_linear_ip().
  */
 #define PERF_EFLAGS_EXACT	(1UL << 3)
+#define PERF_EFLAGS_VM		(1UL << 5)
 
 struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 95bf99d..1b8e5a0 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -25,10 +25,6 @@
 static char temp_stack[4096];
 #endif
 
-asmlinkage void acpi_enter_s3(void)
-{
-	acpi_enter_sleep_state(3, wake_sleep_flags);
-}
 /**
  * acpi_suspend_lowlevel - save kernel state
  *
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 5653a57..67f59f8 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,7 +2,6 @@
  *	Variables and functions used by the code in sleep.c
  */
 
-#include <linux/linkage.h>
 #include <asm/realmode.h>
 
 extern unsigned long saved_video_mode;
@@ -11,7 +10,6 @@
 extern int wakeup_pmode_return;
 
 extern u8 wake_sleep_flags;
-extern asmlinkage void acpi_enter_s3(void);
 
 extern unsigned long acpi_copy_wakeup_routine(unsigned long);
 extern void wakeup_long64(void);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 7261083..13ab7205 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,7 +74,9 @@
 ENTRY(do_suspend_lowlevel)
 	call	save_processor_state
 	call	save_registers
-	call	acpi_enter_s3
+	pushl	$3
+	call	acpi_enter_sleep_state
+	addl	$4, %esp
 
 #	In case of S3 failure, we'll emerge here.  Jump
 # 	to ret_point to recover
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 014d1d2..8ea5164 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,7 +71,9 @@
 	movq	%rsi, saved_rsi
 
 	addq	$8, %rsp
-	call	acpi_enter_s3
+	movl	$3, %edi
+	xorl	%eax, %eax
+	call	acpi_enter_sleep_state
 	/* in case something went wrong, restore the machine status and go on */
 	jmp	resume_point
 
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 931280f..afb7ff7 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -224,7 +224,7 @@
 			ideal_nops = intel_nops;
 #endif
 		}
-
+		break;
 	default:
 #ifdef CONFIG_X86_64
 		ideal_nops = k8_nops;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 406eee7..c265593 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1204,7 +1204,7 @@
 	BUG_ON(!cfg->vector);
 
 	vector = cfg->vector;
-	for_each_cpu(cpu, cfg->domain)
+	for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
 		per_cpu(vector_irq, cpu)[vector] = -1;
 
 	cfg->vector = 0;
@@ -1212,7 +1212,7 @@
 
 	if (likely(!cfg->move_in_progress))
 		return;
-	for_each_cpu(cpu, cfg->old_domain) {
+	for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
 		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
 								vector++) {
 			if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -1356,6 +1356,16 @@
 	if (!IO_APIC_IRQ(irq))
 		return;
 
+	/*
+	 * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC
+	 * can handle this irq and the apic driver is finialized at this point,
+	 * update the cfg->domain.
+	 */
+	if (irq < legacy_pic->nr_legacy_irqs &&
+	    cpumask_equal(cfg->domain, cpumask_of(0)))
+		apic->vector_allocation_domain(0, cfg->domain,
+					       apic->target_cpus());
+
 	if (assign_irq_vector(irq, cfg, apic->target_cpus()))
 		return;
 
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 46d8786..a5fbc3c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -144,6 +144,8 @@
 {
 	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
 	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+	setup_clear_cpu_cap(X86_FEATURE_AVX);
+	setup_clear_cpu_cap(X86_FEATURE_AVX2);
 	return 1;
 }
 __setup("noxsave", x86_xsave_setup);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 413c2ce..1301762 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -55,13 +55,6 @@
 #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
 #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
 #define	MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
-#define MCACOD 0xffff
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB	0x00C0	/* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK	0xfff0
-#define MCACOD_L3WB	0x017A	/* L3 Explicit Writeback */
-#define MCACOD_DATA	0x0134	/* Data Load */
-#define MCACOD_INSTR	0x0150	/* Instruction Fetch */
 
 	MCESEV(
 		NO, "Invalid",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5e095f8..292d025 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -103,6 +103,8 @@
 
 static DEFINE_PER_CPU(struct work_struct, mce_work);
 
+static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
+
 /*
  * CPU/chipset specific EDAC code can register a notifier call here to print
  * MCE errors in a human-readable form.
@@ -650,14 +652,18 @@
  * Do a quick check if any of the events requires a panic.
  * This decides if we keep the events around or clear them.
  */
-static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+			  struct pt_regs *regs)
 {
 	int i, ret = 0;
 
 	for (i = 0; i < banks; i++) {
 		m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
-		if (m->status & MCI_STATUS_VAL)
+		if (m->status & MCI_STATUS_VAL) {
 			__set_bit(i, validp);
+			if (quirk_no_way_out)
+				quirk_no_way_out(i, m, regs);
+		}
 		if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
 			ret = 1;
 	}
@@ -1040,7 +1046,7 @@
 	*final = m;
 
 	memset(valid_banks, 0, sizeof(valid_banks));
-	no_way_out = mce_no_way_out(&m, &msg, valid_banks);
+	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
 
 	barrier();
 
@@ -1418,6 +1424,34 @@
 	}
 }
 
+/*
+ * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
+ * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
+ * Vol 3B Table 15-20). But this confuses both the code that determines
+ * whether the machine check occurred in kernel or user mode, and also
+ * the severity assessment code. Pretend that EIPV was set, and take the
+ * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
+ */
+static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
+{
+	if (bank != 0)
+		return;
+	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
+		return;
+	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
+		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
+			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
+			  MCACOD)) !=
+			 (MCI_STATUS_UC|MCI_STATUS_EN|
+			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
+			  MCI_STATUS_AR|MCACOD_INSTR))
+		return;
+
+	m->mcgstatus |= MCG_STATUS_EIPV;
+	m->ip = regs->ip;
+	m->cs = regs->cs;
+}
+
 /* Add per CPU specific workarounds here */
 static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
 {
@@ -1515,6 +1549,9 @@
 		 */
 		if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
 			mce_bootlog = 0;
+
+		if (c->x86 == 6 && c->x86_model == 45)
+			quirk_no_way_out = quirk_sandybridge_ifu;
 	}
 	if (monarch_timeout < 0)
 		monarch_timeout = 0;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 29557aa..915b876 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -32,6 +32,8 @@
 #include <asm/smp.h>
 #include <asm/alternative.h>
 #include <asm/timer.h>
+#include <asm/desc.h>
+#include <asm/ldt.h>
 
 #include "perf_event.h"
 
@@ -1738,6 +1740,29 @@
 	return (__range_not_ok(fp, size, TASK_SIZE) == 0);
 }
 
+static unsigned long get_segment_base(unsigned int segment)
+{
+	struct desc_struct *desc;
+	int idx = segment >> 3;
+
+	if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+		if (idx > LDT_ENTRIES)
+			return 0;
+
+		if (idx > current->active_mm->context.size)
+			return 0;
+
+		desc = current->active_mm->context.ldt;
+	} else {
+		if (idx > GDT_ENTRIES)
+			return 0;
+
+		desc = __this_cpu_ptr(&gdt_page.gdt[0]);
+	}
+
+	return get_desc_base(desc + idx);
+}
+
 #ifdef CONFIG_COMPAT
 
 #include <asm/compat.h>
@@ -1746,13 +1771,17 @@
 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
 {
 	/* 32-bit process in 64-bit kernel. */
+	unsigned long ss_base, cs_base;
 	struct stack_frame_ia32 frame;
 	const void __user *fp;
 
 	if (!test_thread_flag(TIF_IA32))
 		return 0;
 
-	fp = compat_ptr(regs->bp);
+	cs_base = get_segment_base(regs->cs);
+	ss_base = get_segment_base(regs->ss);
+
+	fp = compat_ptr(ss_base + regs->bp);
 	while (entry->nr < PERF_MAX_STACK_DEPTH) {
 		unsigned long bytes;
 		frame.next_frame     = 0;
@@ -1765,8 +1794,8 @@
 		if (!valid_user_frame(fp, sizeof(frame)))
 			break;
 
-		perf_callchain_store(entry, frame.return_address);
-		fp = compat_ptr(frame.next_frame);
+		perf_callchain_store(entry, cs_base + frame.return_address);
+		fp = compat_ptr(ss_base + frame.next_frame);
 	}
 	return 1;
 }
@@ -1789,6 +1818,12 @@
 		return;
 	}
 
+	/*
+	 * We don't know what to do with VM86 stacks.. ignore them for now.
+	 */
+	if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
+		return;
+
 	fp = (void __user *)regs->bp;
 
 	perf_callchain_store(entry, regs->ip);
@@ -1816,16 +1851,50 @@
 	}
 }
 
+/*
+ * Deal with code segment offsets for the various execution modes:
+ *
+ *   VM86 - the good olde 16 bit days, where the linear address is
+ *          20 bits and we use regs->ip + 0x10 * regs->cs.
+ *
+ *   IA32 - Where we need to look at GDT/LDT segment descriptor tables
+ *          to figure out what the 32bit base address is.
+ *
+ *    X32 - has TIF_X32 set, but is running in x86_64
+ *
+ * X86_64 - CS,DS,SS,ES are all zero based.
+ */
+static unsigned long code_segment_base(struct pt_regs *regs)
+{
+	/*
+	 * If we are in VM86 mode, add the segment offset to convert to a
+	 * linear address.
+	 */
+	if (regs->flags & X86_VM_MASK)
+		return 0x10 * regs->cs;
+
+	/*
+	 * For IA32 we look at the GDT/LDT segment base to convert the
+	 * effective IP to a linear address.
+	 */
+#ifdef CONFIG_X86_32
+	if (user_mode(regs) && regs->cs != __USER_CS)
+		return get_segment_base(regs->cs);
+#else
+	if (test_thread_flag(TIF_IA32)) {
+		if (user_mode(regs) && regs->cs != __USER32_CS)
+			return get_segment_base(regs->cs);
+	}
+#endif
+	return 0;
+}
+
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
 {
-	unsigned long ip;
-
 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
-		ip = perf_guest_cbs->get_guest_ip();
-	else
-		ip = instruction_pointer(regs);
+		return perf_guest_cbs->get_guest_ip();
 
-	return ip;
+	return regs->ip + code_segment_base(regs);
 }
 
 unsigned long perf_misc_flags(struct pt_regs *regs)
@@ -1838,7 +1907,7 @@
 		else
 			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
 	} else {
-		if (!kernel_ip(regs->ip))
+		if (user_mode(regs))
 			misc |= PERF_RECORD_MISC_USER;
 		else
 			misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 821d53b..6605a81 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -516,6 +516,26 @@
 #endif
 }
 
+/*
+ * Not all PMUs provide the right context information to place the reported IP
+ * into full context. Specifically segment registers are typically not
+ * supplied.
+ *
+ * Assuming the address is a linear address (it is for IBS), we fake the CS and
+ * vm86 mode using the known zero-based code segment and 'fix up' the registers
+ * to reflect this.
+ *
+ * Intel PEBS/LBR appear to typically provide the effective address, nothing
+ * much we can do about that but pray and treat it like a linear address.
+ */
+static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
+{
+	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
+	if (regs->flags & X86_VM_MASK)
+		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
+	regs->ip = ip;
+}
+
 #ifdef CONFIG_CPU_SUP_AMD
 
 int amd_pmu_init(void);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index da9bcdc..7bfb5be 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -13,6 +13,8 @@
 
 #include <asm/apic.h>
 
+#include "perf_event.h"
+
 static u32 ibs_caps;
 
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
@@ -536,7 +538,7 @@
 	if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
 		regs.flags &= ~PERF_EFLAGS_EXACT;
 	} else {
-		instruction_pointer_set(&regs, ibs_data.regs[1]);
+		set_linear_ip(&regs, ibs_data.regs[1]);
 		regs.flags |= PERF_EFLAGS_EXACT;
 	}
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3823669..7f2739e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1522,8 +1522,16 @@
 	arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
 	arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
 	arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
+	/*
+	 * If PMU counter has PEBS enabled it is not enough to disable counter
+	 * on a guest entry since PEBS memory write can overshoot guest entry
+	 * and corrupt guest memory. Disabling PEBS solves the problem.
+	 */
+	arr[1].msr = MSR_IA32_PEBS_ENABLE;
+	arr[1].host = cpuc->pebs_enabled;
+	arr[1].guest = 0;
 
-	*nr = 1;
+	*nr = 2;
 	return arr;
 }
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 629ae0b..e38d97b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -499,7 +499,7 @@
 	 * We sampled a branch insn, rewind using the LBR stack
 	 */
 	if (ip == to) {
-		regs->ip = from;
+		set_linear_ip(regs, from);
 		return 1;
 	}
 
@@ -529,7 +529,7 @@
 	} while (to < ip);
 
 	if (to == ip) {
-		regs->ip = old_to;
+		set_linear_ip(regs, old_to);
 		return 1;
 	}
 
@@ -569,7 +569,8 @@
 	 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
 	 */
 	regs = *iregs;
-	regs.ip = pebs->ip;
+	regs.flags = pebs->flags;
+	set_linear_ip(&regs, pebs->ip);
 	regs.bp = pebs->bp;
 	regs.sp = pebs->sp;
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 7563fda..0a55710 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -796,7 +796,6 @@
 
 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
-DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63");
 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
 
@@ -902,16 +901,21 @@
 	.attrs = nhmex_uncore_cbox_formats_attr,
 };
 
+/* msr offset for each instance of cbox */
+static unsigned nhmex_cbox_msr_offsets[] = {
+	0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
+};
+
 static struct intel_uncore_type nhmex_uncore_cbox = {
 	.name			= "cbox",
 	.num_counters		= 6,
-	.num_boxes		= 8,
+	.num_boxes		= 10,
 	.perf_ctr_bits		= 48,
 	.event_ctl		= NHMEX_C0_MSR_PMON_EV_SEL0,
 	.perf_ctr		= NHMEX_C0_MSR_PMON_CTR0,
 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
 	.box_ctl		= NHMEX_C0_MSR_PMON_GLOBAL_CTL,
-	.msr_offset		= NHMEX_C_MSR_OFFSET,
+	.msr_offsets		= nhmex_cbox_msr_offsets,
 	.pair_ctr_ctl		= 1,
 	.ops			= &nhmex_uncore_ops,
 	.format_group		= &nhmex_uncore_cbox_format_group
@@ -1032,24 +1036,22 @@
 
 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 {
-	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
-	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+	struct hw_perf_event *hwc = &event->hw;
+	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
 
-	if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) {
-		reg1->config = event->attr.config1;
-		reg2->config = event->attr.config2;
-	} else {
-		reg1->config = ~0ULL;
-		reg2->config = ~0ULL;
-	}
+	/* only TO_R_PROG_EV event uses the match/mask register */
+	if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
+	    NHMEX_S_EVENT_TO_R_PROG_EV)
+		return 0;
 
 	if (box->pmu->pmu_idx == 0)
 		reg1->reg = NHMEX_S0_MSR_MM_CFG;
 	else
 		reg1->reg = NHMEX_S1_MSR_MM_CFG;
-
 	reg1->idx = 0;
-
+	reg1->config = event->attr.config1;
+	reg2->config = event->attr.config2;
 	return 0;
 }
 
@@ -1059,8 +1061,8 @@
 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
 
-	wrmsrl(reg1->reg, 0);
-	if (reg1->config != ~0ULL || reg2->config != ~0ULL) {
+	if (reg1->idx != EXTRA_REG_NONE) {
+		wrmsrl(reg1->reg, 0);
 		wrmsrl(reg1->reg + 1, reg1->config);
 		wrmsrl(reg1->reg + 2, reg2->config);
 		wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
@@ -1074,7 +1076,6 @@
 	&format_attr_edge.attr,
 	&format_attr_inv.attr,
 	&format_attr_thresh8.attr,
-	&format_attr_mm_cfg.attr,
 	&format_attr_match.attr,
 	&format_attr_mask.attr,
 	NULL,
@@ -1142,6 +1143,9 @@
 	EVENT_EXTRA_END
 };
 
+/* Nehalem-EX or Westmere-EX ? */
+bool uncore_nhmex;
+
 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
 {
 	struct intel_uncore_extra_reg *er;
@@ -1171,18 +1175,29 @@
 		return false;
 
 	/* mask of the shared fields */
-	mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
+	if (uncore_nhmex)
+		mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
+	else
+		mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
 
 	raw_spin_lock_irqsave(&er->lock, flags);
 	/* add mask of the non-shared field if it's in use */
-	if (__BITS_VALUE(atomic_read(&er->ref), idx, 8))
-		mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+	if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
+		if (uncore_nhmex)
+			mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+		else
+			mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+	}
 
 	if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
 		atomic_add(1 << (idx * 8), &er->ref);
-		mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
-			NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+		if (uncore_nhmex)
+			mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
+				NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+		else
+			mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
+				WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
 		er->config &= ~mask;
 		er->config |= (config & mask);
 		ret = true;
@@ -1216,7 +1231,10 @@
 
 	/* get the non-shared control bits and shift them */
 	idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
-	config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+	if (uncore_nhmex)
+		config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+	else
+		config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
 	if (new_idx > orig_idx) {
 		idx = new_idx - orig_idx;
 		config <<= 3 * idx;
@@ -1226,6 +1244,10 @@
 	}
 
 	/* add the shared control bits back */
+	if (uncore_nhmex)
+		config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+	else
+		config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
 	config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
 	if (modify) {
 		/* adjust the main event selector */
@@ -1264,7 +1286,8 @@
 	}
 
 	/* for the match/mask registers */
-	if ((uncore_box_is_fake(box) || !reg2->alloc) &&
+	if (reg2->idx != EXTRA_REG_NONE &&
+	    (uncore_box_is_fake(box) || !reg2->alloc) &&
 	    !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
 		goto fail;
 
@@ -1278,7 +1301,8 @@
 		if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
 			nhmex_mbox_alter_er(event, idx[0], true);
 		reg1->alloc |= alloc;
-		reg2->alloc = 1;
+		if (reg2->idx != EXTRA_REG_NONE)
+			reg2->alloc = 1;
 	}
 	return NULL;
 fail:
@@ -1342,9 +1366,6 @@
 	struct extra_reg *er;
 	unsigned msr;
 	int reg_idx = 0;
-
-	if (WARN_ON_ONCE(reg1->idx != -1))
-		return -EINVAL;
 	/*
 	 * The mbox events may require 2 extra MSRs at the most. But only
 	 * the lower 32 bits in these MSRs are significant, so we can use
@@ -1355,11 +1376,6 @@
 			continue;
 		if (event->attr.config1 & ~er->valid_mask)
 			return -EINVAL;
-		if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) ||
-		    er->idx == __BITS_VALUE(reg1->idx, 1, 8))
-			continue;
-		if (WARN_ON_ONCE(reg_idx >= 2))
-			return -EINVAL;
 
 		msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
 		if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
@@ -1368,6 +1384,8 @@
 		/* always use the 32~63 bits to pass the PLD config */
 		if (er->idx == EXTRA_REG_NHMEX_M_PLD)
 			reg_idx = 1;
+		else if (WARN_ON_ONCE(reg_idx > 0))
+			return -EINVAL;
 
 		reg1->idx &= ~(0xff << (reg_idx * 8));
 		reg1->reg &= ~(0xffff << (reg_idx * 16));
@@ -1376,17 +1394,21 @@
 		reg1->config = event->attr.config1;
 		reg_idx++;
 	}
-	/* use config2 to pass the filter config */
-	reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
-	if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
-		reg2->config = event->attr.config2;
-	else
-		reg2->config = ~0ULL;
-	if (box->pmu->pmu_idx == 0)
-		reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
-	else
-		reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
-
+	/*
+	 * The mbox only provides ability to perform address matching
+	 * for the PLD events.
+	 */
+	if (reg_idx == 2) {
+		reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
+		if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
+			reg2->config = event->attr.config2;
+		else
+			reg2->config = ~0ULL;
+		if (box->pmu->pmu_idx == 0)
+			reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
+		else
+			reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
+	}
 	return 0;
 }
 
@@ -1422,34 +1444,36 @@
 		wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
 			nhmex_mbox_shared_reg_config(box, idx));
 
-	wrmsrl(reg2->reg, 0);
-	if (reg2->config != ~0ULL) {
-		wrmsrl(reg2->reg + 1,
-			reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
-		wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
-			(reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
-		wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+	if (reg2->idx != EXTRA_REG_NONE) {
+		wrmsrl(reg2->reg, 0);
+		if (reg2->config != ~0ULL) {
+			wrmsrl(reg2->reg + 1,
+				reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
+			wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
+				(reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
+			wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+		}
 	}
 
 	wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
 }
 
-DEFINE_UNCORE_FORMAT_ATTR(count_mode,	count_mode,	"config:2-3");
-DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode,	"config:4-5");
-DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,	wrap_mode,	"config:6");
-DEFINE_UNCORE_FORMAT_ATTR(flag_mode,	flag_mode,	"config:7");
-DEFINE_UNCORE_FORMAT_ATTR(inc_sel,	inc_sel,	"config:9-13");
-DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,	set_flag_sel,	"config:19-21");
-DEFINE_UNCORE_FORMAT_ATTR(filter_cfg,	filter_cfg,	"config2:63");
-DEFINE_UNCORE_FORMAT_ATTR(filter_match,	filter_match,	"config2:0-33");
-DEFINE_UNCORE_FORMAT_ATTR(filter_mask,	filter_mask,	"config2:34-61");
-DEFINE_UNCORE_FORMAT_ATTR(dsp,		dsp,		"config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(thr,		thr,		"config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(fvc,		fvc,		"config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(pgt,		pgt,		"config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(map,		map,		"config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(iss,		iss,		"config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(pld,		pld,		"config1:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(count_mode,		count_mode,	"config:2-3");
+DEFINE_UNCORE_FORMAT_ATTR(storage_mode,		storage_mode,	"config:4-5");
+DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,		wrap_mode,	"config:6");
+DEFINE_UNCORE_FORMAT_ATTR(flag_mode,		flag_mode,	"config:7");
+DEFINE_UNCORE_FORMAT_ATTR(inc_sel,		inc_sel,	"config:9-13");
+DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,		set_flag_sel,	"config:19-21");
+DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,	filter_cfg_en,	"config2:63");
+DEFINE_UNCORE_FORMAT_ATTR(filter_match,		filter_match,	"config2:0-33");
+DEFINE_UNCORE_FORMAT_ATTR(filter_mask,		filter_mask,	"config2:34-61");
+DEFINE_UNCORE_FORMAT_ATTR(dsp,			dsp,		"config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(thr,			thr,		"config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(fvc,			fvc,		"config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pgt,			pgt,		"config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(map,			map,		"config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(iss,			iss,		"config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pld,			pld,		"config1:32-63");
 
 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
 	&format_attr_count_mode.attr,
@@ -1458,7 +1482,7 @@
 	&format_attr_flag_mode.attr,
 	&format_attr_inc_sel.attr,
 	&format_attr_set_flag_sel.attr,
-	&format_attr_filter_cfg.attr,
+	&format_attr_filter_cfg_en.attr,
 	&format_attr_filter_match.attr,
 	&format_attr_filter_mask.attr,
 	&format_attr_dsp.attr,
@@ -1482,6 +1506,12 @@
 	{ /* end: all zeroes */ },
 };
 
+static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
+	INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
+	INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
+	{ /* end: all zeroes */ },
+};
+
 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
 	NHMEX_UNCORE_OPS_COMMON_INIT(),
 	.enable_event	= nhmex_mbox_msr_enable_event,
@@ -1513,7 +1543,7 @@
 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 	int port;
 
-	/* adjust the main event selector */
+	/* adjust the main event selector and extra register index */
 	if (reg1->idx % 2) {
 		reg1->idx--;
 		hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
@@ -1522,29 +1552,17 @@
 		hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
 	}
 
-	/* adjust address or config of extra register */
+	/* adjust extra register config */
 	port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
 	switch (reg1->idx % 6) {
-	case 0:
-		reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
-		break;
-	case 1:
-		reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
-		break;
 	case 2:
-		/* the 8~15 bits to the 0~7 bits */
+		/* shift the 8~15 bits to the 0~7 bits */
 		reg1->config >>= 8;
 		break;
 	case 3:
-		/* the 0~7 bits to the 8~15 bits */
+		/* shift the 0~7 bits to the 8~15 bits */
 		reg1->config <<= 8;
 		break;
-	case 4:
-		reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
-		break;
-	case 5:
-		reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
-		break;
 	};
 }
 
@@ -1671,7 +1689,7 @@
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
-	int port, idx;
+	int idx;
 
 	idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
 		NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
@@ -1681,27 +1699,11 @@
 	reg1->idx = idx;
 	reg1->config = event->attr.config1;
 
-	port = idx / 6 + box->pmu->pmu_idx * 4;
-	idx %= 6;
-	switch (idx) {
-	case 0:
-		reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
-		break;
-	case 1:
-		reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
-		break;
-	case 2:
-	case 3:
-		reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port);
-		break;
+	switch (idx % 6) {
 	case 4:
 	case 5:
-		if (idx == 4)
-			reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
-		else
-			reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
-		reg2->config = event->attr.config2;
 		hwc->config |= event->attr.config & (~0ULL << 32);
+		reg2->config = event->attr.config2;
 		break;
 	};
 	return 0;
@@ -1727,28 +1729,34 @@
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
-	int idx, er_idx;
+	int idx, port;
 
-	idx = reg1->idx % 6;
-	er_idx = idx;
-	if (er_idx > 2)
-		er_idx--;
-	er_idx += (reg1->idx / 6) * 5;
+	idx = reg1->idx;
+	port = idx / 6 + box->pmu->pmu_idx * 4;
 
-	switch (idx) {
+	switch (idx % 6) {
 	case 0:
+		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
+		break;
 	case 1:
-		wrmsrl(reg1->reg, reg1->config);
+		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
 		break;
 	case 2:
 	case 3:
-		wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx));
+		wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
+			nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
 		break;
 	case 4:
+		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
+			hwc->config >> 32);
+		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
+		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
+		break;
 	case 5:
-		wrmsrl(reg1->reg, reg1->config);
-		wrmsrl(reg1->reg + 1, hwc->config >> 32);
-		wrmsrl(reg1->reg + 2, reg2->config);
+		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
+			hwc->config >> 32);
+		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
+		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
 		break;
 	};
 
@@ -1756,8 +1764,8 @@
 		(hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
 }
 
-DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63");
-DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
@@ -2303,6 +2311,7 @@
 	event->hw.idx = -1;
 	event->hw.last_tag = ~0ULL;
 	event->hw.extra_reg.idx = EXTRA_REG_NONE;
+	event->hw.branch_reg.idx = EXTRA_REG_NONE;
 
 	if (event->attr.config == UNCORE_FIXED_EVENT) {
 		/* no fixed counter */
@@ -2373,7 +2382,7 @@
 	type->attr_groups[1] = NULL;
 }
 
-static void uncore_types_exit(struct intel_uncore_type **types)
+static void __init uncore_types_exit(struct intel_uncore_type **types)
 {
 	int i;
 	for (i = 0; types[i]; i++)
@@ -2814,7 +2823,13 @@
 			snbep_uncore_cbox.num_boxes = max_cores;
 		msr_uncores = snbep_msr_uncores;
 		break;
-	case 46:
+	case 46: /* Nehalem-EX */
+		uncore_nhmex = true;
+	case 47: /* Westmere-EX aka. Xeon E7 */
+		if (!uncore_nhmex)
+			nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
+		if (nhmex_uncore_cbox.num_boxes > max_cores)
+			nhmex_uncore_cbox.num_boxes = max_cores;
 		msr_uncores = nhmex_msr_uncores;
 		break;
 	default:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index f385189..5b81c18 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -5,7 +5,7 @@
 #include "perf_event.h"
 
 #define UNCORE_PMU_NAME_LEN		32
-#define UNCORE_PMU_HRTIMER_INTERVAL	(60 * NSEC_PER_SEC)
+#define UNCORE_PMU_HRTIMER_INTERVAL	(60LL * NSEC_PER_SEC)
 
 #define UNCORE_FIXED_EVENT		0xff
 #define UNCORE_PMC_IDX_MAX_GENERIC	8
@@ -230,6 +230,7 @@
 #define NHMEX_S1_MSR_MASK			0xe5a
 
 #define NHMEX_S_PMON_MM_CFG_EN			(0x1ULL << 63)
+#define NHMEX_S_EVENT_TO_R_PROG_EV		0
 
 /* NHM-EX Mbox */
 #define NHMEX_M0_MSR_GLOBAL_CTL			0xca0
@@ -275,18 +276,12 @@
 		 NHMEX_M_PMON_CTL_INC_SEL_MASK |	\
 		 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
 
-
-#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK	0x1f
-#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK	(0x7 << 5)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK	(0x7 << 8)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR	(1 << 23)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK			\
-		(NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK |	\
-		 NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK |	\
-		 NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK  |	\
-		 NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR)
+#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 11) - 1) | (1 << 23))
 #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7 << (11 + 3 * (n)))
 
+#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 12) - 1) | (1 << 24))
+#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7 << (12 + 3 * (n)))
+
 /*
  * use the 9~13 bits to select event If the 7th bit is not set,
  * otherwise use the 19~21 bits to select event.
@@ -368,6 +363,7 @@
 	unsigned num_shared_regs:8;
 	unsigned single_fixed:1;
 	unsigned pair_ctr_ctl:1;
+	unsigned *msr_offsets;
 	struct event_constraint unconstrainted;
 	struct event_constraint *constraints;
 	struct intel_uncore_pmu *pmus;
@@ -485,29 +481,31 @@
 	return idx * 8 + box->pmu->type->perf_ctr;
 }
 
-static inline
-unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
+{
+	struct intel_uncore_pmu *pmu = box->pmu;
+	return pmu->type->msr_offsets ?
+		pmu->type->msr_offsets[pmu->pmu_idx] :
+		pmu->type->msr_offset * pmu->pmu_idx;
+}
+
+static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
 {
 	if (!box->pmu->type->box_ctl)
 		return 0;
-	return box->pmu->type->box_ctl +
-		box->pmu->type->msr_offset * box->pmu->pmu_idx;
+	return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
 }
 
-static inline
-unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
 {
 	if (!box->pmu->type->fixed_ctl)
 		return 0;
-	return box->pmu->type->fixed_ctl +
-		box->pmu->type->msr_offset * box->pmu->pmu_idx;
+	return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
 }
 
-static inline
-unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
 {
-	return box->pmu->type->fixed_ctr +
-		box->pmu->type->msr_offset * box->pmu->pmu_idx;
+	return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
 }
 
 static inline
@@ -515,7 +513,7 @@
 {
 	return box->pmu->type->event_ctl +
 		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
-		box->pmu->type->msr_offset * box->pmu->pmu_idx;
+		uncore_msr_box_offset(box);
 }
 
 static inline
@@ -523,7 +521,7 @@
 {
 	return box->pmu->type->perf_ctr +
 		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
-		box->pmu->type->msr_offset * box->pmu->pmu_idx;
+		uncore_msr_box_offset(box);
 }
 
 static inline
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 1f5f1d5..7ad683d 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -328,6 +328,7 @@
 				chip->irq_retrigger(data);
 			raw_spin_unlock(&desc->lock);
 		}
+		__this_cpu_write(vector_irq[vector], -1);
 	}
 }
 #endif
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 1d5d31e..dc1404b 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -107,7 +107,7 @@
 {
 	struct setup_data_node *node;
 	struct setup_data *data;
-	int error = -ENOMEM;
+	int error;
 	struct dentry *d;
 	struct page *pg;
 	u64 pa_data;
@@ -121,8 +121,10 @@
 
 	while (pa_data) {
 		node = kmalloc(sizeof(*node), GFP_KERNEL);
-		if (!node)
+		if (!node) {
+			error = -ENOMEM;
 			goto err_dir;
+		}
 
 		pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
 		if (PageHighMem(pg)) {
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1df8fb9..e498b18 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -316,6 +316,11 @@
 	addr &= 1;
 	if (addr == 0) {
 		if (val & 0x10) {
+			u8 edge_irr = s->irr & ~s->elcr;
+			int i;
+			bool found;
+			struct kvm_vcpu *vcpu;
+
 			s->init4 = val & 1;
 			s->last_irr = 0;
 			s->irr &= s->elcr;
@@ -333,6 +338,18 @@
 			if (val & 0x08)
 				pr_pic_unimpl(
 					"level sensitive irq not supported");
+
+			kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
+				if (kvm_apic_accept_pic_intr(vcpu)) {
+					found = true;
+					break;
+				}
+
+
+			if (found)
+				for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
+					if (edge_irr & (1 << irq))
+						pic_clear_isr(s, irq);
 		} else if (val & 0x08) {
 			if (val & 0x04)
 				s->poll = 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c39b607..c00f03d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1488,13 +1488,6 @@
 		loadsegment(ds, vmx->host_state.ds_sel);
 		loadsegment(es, vmx->host_state.es_sel);
 	}
-#else
-	/*
-	 * The sysexit path does not restore ds/es, so we must set them to
-	 * a reasonable value ourselves.
-	 */
-	loadsegment(ds, __USER_DS);
-	loadsegment(es, __USER_DS);
 #endif
 	reload_tss();
 #ifdef CONFIG_X86_64
@@ -6370,6 +6363,19 @@
 #endif
 	      );
 
+#ifndef CONFIG_X86_64
+	/*
+	 * The sysexit path does not restore ds/es, so we must set them to
+	 * a reasonable value ourselves.
+	 *
+	 * We can't defer this to vmx_load_host_state() since that function
+	 * may be executed in interrupt context, which saves and restore segments
+	 * around it, nullifying its effect.
+	 */
+	loadsegment(ds, __USER_DS);
+	loadsegment(es, __USER_DS);
+#endif
+
 	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
 				  | (1 << VCPU_EXREG_RFLAGS)
 				  | (1 << VCPU_EXREG_CPL)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59b5950..42bce48 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -925,6 +925,10 @@
 	 */
 	getboottime(&boot);
 
+	if (kvm->arch.kvmclock_offset) {
+		struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
+		boot = timespec_sub(boot, ts);
+	}
 	wc.sec = boot.tv_sec;
 	wc.nsec = boot.tv_nsec;
 	wc.version = version;
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f6679a7..b91e485 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -56,9 +56,16 @@
 }
 
 /*
- * search for a shareable pmd page for hugetlb.
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
  */
-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+static pte_t *
+huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
 {
 	struct vm_area_struct *vma = find_vma(mm, addr);
 	struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@
 	struct vm_area_struct *svma;
 	unsigned long saddr;
 	pte_t *spte = NULL;
+	pte_t *pte;
 
 	if (!vma_shareable(vma, addr))
-		return;
+		return (pte_t *)pmd_alloc(mm, pud, addr);
 
 	mutex_lock(&mapping->i_mmap_mutex);
 	vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@
 		put_page(virt_to_page(spte));
 	spin_unlock(&mm->page_table_lock);
 out:
+	pte = (pte_t *)pmd_alloc(mm, pud, addr);
 	mutex_unlock(&mapping->i_mmap_mutex);
+	return pte;
 }
 
 /*
@@ -142,8 +152,9 @@
 		} else {
 			BUG_ON(sz != PMD_SIZE);
 			if (pud_none(*pud))
-				huge_pmd_share(mm, addr, pud);
-			pte = (pte_t *) pmd_alloc(mm, pud, addr);
+				pte = huge_pmd_share(mm, addr, pud);
+			else
+				pte = (pte_t *)pmd_alloc(mm, pud, addr);
 		}
 	}
 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 931930a..a718e0d 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -919,13 +919,11 @@
 
 	/*
 	 * On success we use clflush, when the CPU supports it to
-	 * avoid the wbindv. If the CPU does not support it, in the
-	 * error case, and during early boot (for EFI) we fall back
-	 * to cpa_flush_all (which uses wbinvd):
+	 * avoid the wbindv. If the CPU does not support it and in the
+	 * error case we fall back to cpa_flush_all (which uses
+	 * wbindv):
 	 */
-	if (early_boot_irqs_disabled)
-		__cpa_flush_all((void *)(long)cache);
-	else if (!ret && cpu_has_clflush) {
+	if (!ret && cpu_has_clflush) {
 		if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
 			cpa_flush_array(addr, numpages, cache,
 					cpa.flags, pages);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4599c3e8..4ddf497 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -142,23 +142,23 @@
 #endif
 
 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
-void __init
+int __init
 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
 {
 	u64 start, end;
 	int node, pxm;
 
 	if (srat_disabled())
-		return;
+		return -1;
 	if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
 		bad_srat();
-		return;
+		return -1;
 	}
 	if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
-		return;
+		return -1;
 
 	if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
-		return;
+		return -1;
 	start = ma->base_address;
 	end = start + ma->length;
 	pxm = ma->proximity_domain;
@@ -168,12 +168,12 @@
 	if (node < 0) {
 		printk(KERN_ERR "SRAT: Too many proximity domains.\n");
 		bad_srat();
-		return;
+		return -1;
 	}
 
 	if (numa_add_memblk(node, start, end) < 0) {
 		bad_srat();
-		return;
+		return -1;
 	}
 
 	node_set(node, numa_nodes_parsed);
@@ -181,6 +181,7 @@
 	printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
 	       node, pxm,
 	       (unsigned long long) start, (unsigned long long) end - 1);
+	return 0;
 }
 
 void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 2dc29f5..92660eda 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -234,7 +234,22 @@
 	return status;
 }
 
-static int efi_set_rtc_mmss(unsigned long nowtime)
+static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
+					     efi_time_cap_t *tc)
+{
+	unsigned long flags;
+	efi_status_t status;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	efi_call_phys_prelog();
+	status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
+				virt_to_phys(tc));
+	efi_call_phys_epilog();
+	spin_unlock_irqrestore(&rtc_lock, flags);
+	return status;
+}
+
+int efi_set_rtc_mmss(unsigned long nowtime)
 {
 	int real_seconds, real_minutes;
 	efi_status_t 	status;
@@ -263,7 +278,7 @@
 	return 0;
 }
 
-static unsigned long efi_get_time(void)
+unsigned long efi_get_time(void)
 {
 	efi_status_t status;
 	efi_time_t eft;
@@ -606,13 +621,18 @@
 	}
 	/*
 	 * We will only need *early* access to the following
-	 * EFI runtime service before set_virtual_address_map
+	 * two EFI runtime services before set_virtual_address_map
 	 * is invoked.
 	 */
+	efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
 	efi_phys.set_virtual_address_map =
 		(efi_set_virtual_address_map_t *)
 		runtime->set_virtual_address_map;
-
+	/*
+	 * Make efi_get_time can be called before entering
+	 * virtual mode.
+	 */
+	efi.get_time = phys_efi_get_time;
 	early_iounmap(runtime, sizeof(efi_runtime_services_t));
 
 	return 0;
@@ -700,10 +720,12 @@
 		efi_enabled = 0;
 		return;
 	}
+#ifdef CONFIG_X86_32
 	if (efi_native) {
 		x86_platform.get_wallclock = efi_get_time;
 		x86_platform.set_wallclock = efi_set_rtc_mmss;
 	}
+#endif
 
 #if EFI_DEBUG
 	print_efi_memmap();
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b2d534c..8869287 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -72,7 +72,7 @@
 		   -Wall -Wstrict-prototypes \
 		   -march=i386 -mregparm=3 \
 		   -include $(srctree)/$(src)/../../boot/code16gcc.h \
-		   -fno-strict-aliasing -fomit-frame-pointer \
+		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
 		   $(call cc-option, -ffreestanding) \
 		   $(call cc-option, -fno-toplevel-reorder,\
 			$(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 51171ae..a582bfe 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -60,8 +60,8 @@
 51	common	getsockname		sys_getsockname
 52	common	getpeername		sys_getpeername
 53	common	socketpair		sys_socketpair
-54	common	setsockopt		sys_setsockopt
-55	common	getsockopt		sys_getsockopt
+54	64	setsockopt		sys_setsockopt
+55	64	getsockopt		sys_getsockopt
 56	common	clone			stub_clone
 57	common	fork			stub_fork
 58	common	vfork			stub_vfork
@@ -318,7 +318,7 @@
 309	common	getcpu			sys_getcpu
 310	64	process_vm_readv	sys_process_vm_readv
 311	64	process_vm_writev	sys_process_vm_writev
-312	64	kcmp			sys_kcmp
+312	common	kcmp			sys_kcmp
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
@@ -353,3 +353,5 @@
 538	x32	sendmmsg		compat_sys_sendmmsg
 539	x32	process_vm_readv	compat_sys_process_vm_readv
 540	x32	process_vm_writev	compat_sys_process_vm_writev
+541	x32	setsockopt		compat_sys_setsockopt
+542	x32	getsockopt		compat_sys_getsockopt
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 64effdc6..b2e91d4 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -194,6 +194,11 @@
  * boundary violation will require three middle nodes. */
 RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
 
+/* When we populate back during bootup, the amount of pages can vary. The
+ * max we have is seen is 395979, but that does not mean it can't be more.
+ * But some machines can have 3GB I/O holes even. So lets reserve enough
+ * for 4GB of I/O and E820 holes. */
+RESERVE_BRK(p2m_populated, PMD_SIZE * 4);
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
 	BUG_ON(pfn >= MAX_P2M_PFN);
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index ac70341..d5fdd36 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -69,7 +69,9 @@
 };
 MODULE_DEVICE_TABLE(acpi, ac_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_ac_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
 
 static struct acpi_driver acpi_ac_driver = {
@@ -313,6 +315,7 @@
 	return result;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_ac_resume(struct device *dev)
 {
 	struct acpi_ac *ac;
@@ -332,6 +335,7 @@
 		kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
 	return 0;
 }
+#endif
 
 static int acpi_ac_remove(struct acpi_device *device, int type)
 {
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5ccb99a..5de4ec7 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -83,22 +83,22 @@
 /*
  * hwsleep - sleep/wake support (Legacy sleep registers)
  */
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state);
 
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state);
 
-acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_wake(u8 sleep_state);
 
 /*
  * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
  */
 void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
 
-acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_sleep(u8 sleep_state);
 
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state);
 
-acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_wake(u8 sleep_state);
 
 /*
  * hwvalid - Port I/O with validation
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 48518da..94996f9 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -90,7 +90,6 @@
  * FUNCTION:    acpi_hw_extended_sleep
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -100,7 +99,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_sleep(u8 sleep_state)
 {
 	acpi_status status;
 	u8 sleep_type_value;
@@ -125,12 +124,6 @@
 
 	acpi_gbl_system_awake_and_running = FALSE;
 
-	/* Optionally execute _GTS (Going To Sleep) */
-
-	if (flags & ACPI_EXECUTE_GTS) {
-		acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
-	}
-
 	/* Flush caches, as per ACPI specification */
 
 	ACPI_FLUSH_CPU_CACHE();
@@ -172,7 +165,6 @@
  * FUNCTION:    acpi_hw_extended_wake_prep
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              flags               - ACPI_EXECUTE_BFS to run optional method
  *
  * RETURN:      Status
  *
@@ -181,7 +173,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
 {
 	acpi_status status;
 	u8 sleep_type_value;
@@ -200,11 +192,6 @@
 				 &acpi_gbl_FADT.sleep_control);
 	}
 
-	/* Optionally execute _BFS (Back From Sleep) */
-
-	if (flags & ACPI_EXECUTE_BFS) {
-		acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
-	}
 	return_ACPI_STATUS(AE_OK);
 }
 
@@ -222,7 +209,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_wake(u8 sleep_state)
 {
 	ACPI_FUNCTION_TRACE(hw_extended_wake);
 
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 9960fe9..3fddde0 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -56,7 +56,6 @@
  * FUNCTION:    acpi_hw_legacy_sleep
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -64,7 +63,7 @@
  *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
  *
  ******************************************************************************/
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
 {
 	struct acpi_bit_register_info *sleep_type_reg_info;
 	struct acpi_bit_register_info *sleep_enable_reg_info;
@@ -110,12 +109,6 @@
 		return_ACPI_STATUS(status);
 	}
 
-	/* Optionally execute _GTS (Going To Sleep) */
-
-	if (flags & ACPI_EXECUTE_GTS) {
-		acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
-	}
-
 	/* Get current value of PM1A control */
 
 	status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
@@ -214,7 +207,6 @@
  * FUNCTION:    acpi_hw_legacy_wake_prep
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              flags               - ACPI_EXECUTE_BFS to run optional method
  *
  * RETURN:      Status
  *
@@ -224,7 +216,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
 {
 	acpi_status status;
 	struct acpi_bit_register_info *sleep_type_reg_info;
@@ -275,11 +267,6 @@
 		}
 	}
 
-	/* Optionally execute _BFS (Back From Sleep) */
-
-	if (flags & ACPI_EXECUTE_BFS) {
-		acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
-	}
 	return_ACPI_STATUS(status);
 }
 
@@ -288,7 +275,6 @@
  * FUNCTION:    acpi_hw_legacy_wake
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              flags               - Reserved, set to zero
  *
  * RETURN:      Status
  *
@@ -297,7 +283,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_wake(u8 sleep_state)
 {
 	acpi_status status;
 
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f8684bf..1f165a7 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -50,7 +50,7 @@
 
 /* Local prototypes */
 static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
+acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
 
 /*
  * Dispatch table used to efficiently branch to the various sleep
@@ -235,7 +235,7 @@
  *
  ******************************************************************************/
 static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
+acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
 {
 	acpi_status status;
 	struct acpi_sleep_functions *sleep_functions =
@@ -248,11 +248,11 @@
 	 * use the extended sleep registers
 	 */
 	if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
-		status = sleep_functions->extended_function(sleep_state, flags);
+		status = sleep_functions->extended_function(sleep_state);
 	} else {
 		/* Legacy sleep */
 
-		status = sleep_functions->legacy_function(sleep_state, flags);
+		status = sleep_functions->legacy_function(sleep_state);
 	}
 
 	return (status);
@@ -262,7 +262,7 @@
 	 * For the case where reduced-hardware-only code is being generated,
 	 * we know that only the extended sleep registers are available
 	 */
-	status = sleep_functions->extended_function(sleep_state, flags);
+	status = sleep_functions->extended_function(sleep_state);
 	return (status);
 
 #endif				/* !ACPI_REDUCED_HARDWARE */
@@ -349,7 +349,6 @@
  * FUNCTION:    acpi_enter_sleep_state
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -357,7 +356,7 @@
  *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
  *
  ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
 {
 	acpi_status status;
 
@@ -371,7 +370,7 @@
 	}
 
 	status =
-	    acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
+	    acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
 	return_ACPI_STATUS(status);
 }
 
@@ -391,14 +390,14 @@
  *              Called with interrupts DISABLED.
  *
  ******************************************************************************/
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
 {
 	acpi_status status;
 
 	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
 
 	status =
-	    acpi_hw_sleep_dispatch(sleep_state, flags,
+	    acpi_hw_sleep_dispatch(sleep_state,
 				   ACPI_WAKE_PREP_FUNCTION_ID);
 	return_ACPI_STATUS(status);
 }
@@ -423,8 +422,7 @@
 
 	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
 
-
-	status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
+	status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
 	return_ACPI_STATUS(status);
 }
 
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index ff2c876..45e3e17 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1052,6 +1052,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 /* this is needed to learn about changes made in suspended state */
 static int acpi_battery_resume(struct device *dev)
 {
@@ -1068,6 +1069,7 @@
 	acpi_battery_update(battery);
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
 
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 79d4c22..314a3b8 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -78,7 +78,9 @@
 static int acpi_button_remove(struct acpi_device *device, int type);
 static void acpi_button_notify(struct acpi_device *device, u32 event);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_button_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
 
 static struct acpi_driver acpi_button_driver = {
@@ -310,6 +312,7 @@
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_button_resume(struct device *dev)
 {
 	struct acpi_device *device = to_acpi_device(dev);
@@ -319,6 +322,7 @@
 		return acpi_lid_send_state(device);
 	return 0;
 }
+#endif
 
 static int acpi_button_add(struct acpi_device *device)
 {
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 669d9ee..bc36a47 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -53,8 +53,10 @@
 };
 MODULE_DEVICE_TABLE(acpi, fan_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_fan_suspend(struct device *dev);
 static int acpi_fan_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
 
 static struct acpi_driver acpi_fan_driver = {
@@ -184,6 +186,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_fan_suspend(struct device *dev)
 {
 	if (!dev)
@@ -207,6 +210,7 @@
 
 	return result;
 }
+#endif
 
 static int __init acpi_fan_init(void)
 {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e56f3be..cb31298 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -237,6 +237,8 @@
 	return 0;
 }
 
+static int __initdata parsed_numa_memblks;
+
 static int __init
 acpi_parse_memory_affinity(struct acpi_subtable_header * header,
 			   const unsigned long end)
@@ -250,8 +252,8 @@
 	acpi_table_print_srat_entry(header);
 
 	/* let architecture-dependent part to do it */
-	acpi_numa_memory_affinity_init(memory_affinity);
-
+	if (!acpi_numa_memory_affinity_init(memory_affinity))
+		parsed_numa_memblks++;
 	return 0;
 }
 
@@ -304,8 +306,10 @@
 
 	acpi_numa_arch_fixup();
 
-	if (cnt <= 0)
-		return cnt ?: -ENOENT;
+	if (cnt < 0)
+		return cnt;
+	else if (!parsed_numa_memblks)
+		return -ENOENT;
 	return 0;
 }
 
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ec54014c..72a2c98 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -573,8 +573,15 @@
 			OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
 	if (pci_msi_enabled())
 		flags |= OSC_MSI_SUPPORT;
-	if (flags != base_flags)
-		acpi_pci_osc_support(root, flags);
+	if (flags != base_flags) {
+		status = acpi_pci_osc_support(root, flags);
+		if (ACPI_FAILURE(status)) {
+			dev_info(root->bus->bridge, "ACPI _OSC support "
+				"notification failed, disabling PCIe ASPM\n");
+			pcie_no_aspm();
+			flags = base_flags;
+		}
+	}
 
 	if (!pcie_ports_disabled
 	    && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 215ecd0..fc18034 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -67,7 +67,9 @@
 };
 MODULE_DEVICE_TABLE(acpi, power_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_power_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
 
 static struct acpi_driver acpi_power_driver = {
@@ -775,6 +777,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_power_resume(struct device *dev)
 {
 	int result = 0, state;
@@ -803,6 +806,7 @@
 
 	return result;
 }
+#endif
 
 int __init acpi_power_init(void)
 {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index ff8e04f..bfc31cb 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -437,7 +437,7 @@
 		/* Normal CPU soft online event */
 		} else {
 			acpi_processor_ppc_has_changed(pr, 0);
-			acpi_processor_cst_has_changed(pr);
+			acpi_processor_hotplug(pr);
 			acpi_processor_reevaluate_tstate(pr, action);
 			acpi_processor_tstate_has_changed(pr);
 		}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index c0b9aa5..ff0740e 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -988,6 +988,7 @@
 #endif
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_sbs_resume(struct device *dev)
 {
 	struct acpi_sbs *sbs;
@@ -997,6 +998,7 @@
 	acpi_sbs_callback(sbs);
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
 
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7a7a9c9..fdcdbb6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,36 +28,7 @@
 #include "internal.h"
 #include "sleep.h"
 
-u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
-static unsigned int gts, bfs;
-static int set_param_wake_flag(const char *val, struct kernel_param *kp)
-{
-	int ret = param_set_int(val, kp);
-
-	if (ret)
-		return ret;
-
-	if (kp->arg == (const char *)&gts) {
-		if (gts)
-			wake_sleep_flags |= ACPI_EXECUTE_GTS;
-		else
-			wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
-	}
-	if (kp->arg == (const char *)&bfs) {
-		if (bfs)
-			wake_sleep_flags |= ACPI_EXECUTE_BFS;
-		else
-			wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
-	}
-	return ret;
-}
-module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
-module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
-MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
-MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
-
 static u8 sleep_states[ACPI_S_STATE_COUNT];
-static bool pwr_btn_event_pending;
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
@@ -110,6 +81,7 @@
 
 #ifdef CONFIG_ACPI_SLEEP
 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+static bool pwr_btn_event_pending;
 
 /*
  * The ACPI specification wants us to save NVS memory regions during hibernation
@@ -305,7 +277,7 @@
 	switch (acpi_state) {
 	case ACPI_STATE_S1:
 		barrier();
-		status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
+		status = acpi_enter_sleep_state(acpi_state);
 		break;
 
 	case ACPI_STATE_S3:
@@ -319,8 +291,8 @@
 	/* This violates the spec but is required for bug compatibility. */
 	acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
 
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
+	/* Reprogram control registers */
+	acpi_leave_sleep_state_prep(acpi_state);
 
 	/* ACPI 3.0 specs (P62) says that it's the responsibility
 	 * of the OSPM to clear the status bit [ implying that the
@@ -603,9 +575,9 @@
 	ACPI_FLUSH_CPU_CACHE();
 
 	/* This shouldn't return.  If it returns, we have a problem */
-	status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
+	status = acpi_enter_sleep_state(ACPI_STATE_S4);
+	/* Reprogram control registers */
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
 
 	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
 }
@@ -617,8 +589,8 @@
 	 * enable it here.
 	 */
 	acpi_enable();
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
+	/* Reprogram control registers */
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
 	/* Check the hardware signature */
 	if (facs && s4_hardware_signature != facs->hardware_signature) {
 		printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -892,33 +864,7 @@
 	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
 	printk(KERN_DEBUG "%s called\n", __func__);
 	local_irq_disable();
-	acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
-}
-
-/*
- * ACPI 2.0 created the optional _GTS and _BFS,
- * but industry adoption has been neither rapid nor broad.
- *
- * Linux gets into trouble when it executes poorly validated
- * paths through the BIOS, so disable _GTS and _BFS by default,
- * but do speak up and offer the option to enable them.
- */
-static void __init acpi_gts_bfs_check(void)
-{
-	acpi_handle dummy;
-
-	if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
-	{
-		printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
-		printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
-			"please notify linux-acpi@vger.kernel.org\n");
-	}
-	if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
-	{
-		printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
-		printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
-			"please notify linux-acpi@vger.kernel.org\n");
-	}
+	acpi_enter_sleep_state(ACPI_STATE_S5);
 }
 
 int __init acpi_sleep_init(void)
@@ -979,6 +925,5 @@
 	 * object can also be evaluated when the system enters S5.
 	 */
 	register_reboot_notifier(&tts_notifier);
-	acpi_gts_bfs_check();
 	return 0;
 }
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 240a244..7c3f98b 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@
 {
 	int result = 0;
 
-	if (!strncmp(val, "enable", strlen("enable"))) {
+	if (!strncmp(val, "enable", sizeof("enable") - 1)) {
 		result = acpi_debug_trace(trace_method_name, trace_debug_level,
 					  trace_debug_layer, 0);
 		if (result)
@@ -181,7 +181,7 @@
 		goto exit;
 	}
 
-	if (!strncmp(val, "disable", strlen("disable"))) {
+	if (!strncmp(val, "disable", sizeof("disable") - 1)) {
 		int name = 0;
 		result = acpi_debug_trace((char *)&name, trace_debug_level,
 					  trace_debug_layer, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 9fe90e9..edda74a 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -106,7 +106,9 @@
 };
 MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_thermal_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
 
 static struct acpi_driver acpi_thermal_driver = {
@@ -1041,6 +1043,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_thermal_resume(struct device *dev)
 {
 	struct acpi_thermal *tz;
@@ -1075,6 +1078,7 @@
 
 	return AE_OK;
 }
+#endif
 
 static int thermal_act(const struct dmi_system_id *d) {
 
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d438601..96cce6d 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2362,7 +2362,7 @@
 	{  
 		printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
 			    dev->number);  
-		return error;  
+		return -ENOMEM;
 	}  
 	IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
 			dev->number, iadev->pci->revision, base, iadev->irq);)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f338037..5e6e00b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1865,6 +1865,7 @@
 		 struct va_format *vaf)
 {
 	char dict[128];
+	const char *level_extra = "";
 	size_t dictlen = 0;
 	const char *subsys;
 
@@ -1911,10 +1912,14 @@
 				    "DEVICE=+%s:%s", subsys, dev_name(dev));
 	}
 skip:
+	if (level[2])
+		level_extra = &level[2]; /* skip past KERN_SOH "L" */
+
 	return printk_emit(0, level[1] - '0',
 			   dictlen ? dict : NULL, dictlen,
-			   "%s %s: %pV",
-			   dev_driver_string(dev), dev_name(dev), vaf);
+			   "%s %s: %s%pV",
+			   dev_driver_string(dev), dev_name(dev),
+			   level_extra, vaf);
 }
 EXPORT_SYMBOL(__dev_printk);
 
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 869d7ff..eb78e96 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -169,8 +169,7 @@
  */
 int pm_clk_create(struct device *dev)
 {
-	int ret = dev_pm_get_subsys_data(dev);
-	return ret < 0 ? ret : 0;
+	return dev_pm_get_subsys_data(dev);
 }
 
 /**
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index a14085c..39c3252 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -24,7 +24,6 @@
 int dev_pm_get_subsys_data(struct device *dev)
 {
 	struct pm_subsys_data *psd;
-	int ret = 0;
 
 	psd = kzalloc(sizeof(*psd), GFP_KERNEL);
 	if (!psd)
@@ -40,7 +39,6 @@
 		dev->power.subsys_data = psd;
 		pm_clk_init(dev);
 		psd = NULL;
-		ret = 1;
 	}
 
 	spin_unlock_irq(&dev->power.lock);
@@ -48,7 +46,7 @@
 	/* kfree() verifies that its argument is nonzero. */
 	kfree(psd);
 
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
 
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 5989487..7d9c1cb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -147,6 +147,8 @@
 	    || (dev->power.request_pending
 			&& dev->power.request == RPM_REQ_RESUME))
 		retval = -EAGAIN;
+	else if (__dev_pm_qos_read_value(dev) < 0)
+		retval = -EPERM;
 	else if (dev->power.runtime_status == RPM_SUSPENDED)
 		retval = 1;
 
@@ -388,7 +390,6 @@
 		goto repeat;
 	}
 
-	dev->power.deferred_resume = false;
 	if (dev->power.no_callbacks)
 		goto no_callback;	/* Assume success. */
 
@@ -403,12 +404,6 @@
 		goto out;
 	}
 
-	if (__dev_pm_qos_read_value(dev) < 0) {
-		/* Negative PM QoS constraint means "never suspend". */
-		retval = -EPERM;
-		goto out;
-	}
-
 	__update_runtime_status(dev, RPM_SUSPENDING);
 
 	if (dev->pm_domain)
@@ -440,6 +435,7 @@
 	wake_up_all(&dev->power.wait_queue);
 
 	if (dev->power.deferred_resume) {
+		dev->power.deferred_resume = false;
 		rpm_resume(dev, 0);
 		retval = -EAGAIN;
 		goto out;
@@ -584,6 +580,7 @@
 		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 			atomic_inc(&dev->parent->power.child_count);
 			spin_unlock(&dev->parent->power.lock);
+			retval = 1;
 			goto no_callback;	/* Assume success. */
 		}
 		spin_unlock(&dev->parent->power.lock);
@@ -664,7 +661,7 @@
 	}
 	wake_up_all(&dev->power.wait_queue);
 
-	if (!retval)
+	if (retval >= 0)
 		rpm_idle(dev, RPM_ASYNC);
 
  out:
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 06b3207a..a533af2 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -48,12 +48,12 @@
 
 config BCMA_SFLASH
 	bool
-	depends on BCMA_DRIVER_MIPS && BROKEN
+	depends on BCMA_DRIVER_MIPS
 	default y
 
 config BCMA_NFLASH
 	bool
-	depends on BCMA_DRIVER_MIPS && BROKEN
+	depends on BCMA_DRIVER_MIPS
 	default y
 
 config BCMA_DRIVER_GMAC_CMN
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 3cf9cc9..169fc58 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -54,6 +54,7 @@
 #ifdef CONFIG_BCMA_SFLASH
 /* driver_chipcommon_sflash.c */
 int bcma_sflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_sflash_dev;
 #else
 static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
@@ -65,6 +66,7 @@
 #ifdef CONFIG_BCMA_NFLASH
 /* driver_chipcommon_nflash.c */
 int bcma_nflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_nflash_dev;
 #else
 static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index 574d624..9042781 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -5,15 +5,37 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 #include "bcma_private.h"
 
+struct platform_device bcma_nflash_dev = {
+	.name		= "bcma_nflash",
+	.num_resources	= 0,
+};
+
 /* Initialize NAND flash access */
 int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
-	bcma_err(cc->core->bus, "NAND flash support is broken\n");
+	struct bcma_bus *bus = cc->core->bus;
+
+	if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
+	    cc->core->id.rev != 0x38) {
+		bcma_err(bus, "NAND flash on unsupported board!\n");
+		return -ENOTSUPP;
+	}
+
+	if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
+		bcma_err(bus, "NAND flash not present according to ChipCommon\n");
+		return -ENODEV;
+	}
+
+	cc->nflash.present = true;
+
+	/* Prepare platform device, but don't register it yet. It's too early,
+	 * malloc (required by device_private_init) is not available yet. */
+	bcma_nflash_dev.dev.platform_data = &cc->nflash;
+
 	return 0;
 }
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index c9a4f46..8b8f2f3 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -101,7 +101,7 @@
 	bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
 }
 
-void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
+static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
 {
 	struct bcma_bus *bus = cc->core->bus;
 
@@ -257,7 +257,7 @@
 }
 
 /* query bus clock frequency for PMU-enabled chipcommon */
-u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
+static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
 {
 	struct bcma_bus *bus = cc->core->bus;
 
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 6e157a5..2c4eec2 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -5,15 +5,132 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 #include "bcma_private.h"
 
+static struct resource bcma_sflash_resource = {
+	.name	= "bcma_sflash",
+	.start	= BCMA_SFLASH,
+	.end	= 0,
+	.flags  = IORESOURCE_MEM | IORESOURCE_READONLY,
+};
+
+struct platform_device bcma_sflash_dev = {
+	.name		= "bcma_sflash",
+	.resource	= &bcma_sflash_resource,
+	.num_resources	= 1,
+};
+
+struct bcma_sflash_tbl_e {
+	char *name;
+	u32 id;
+	u32 blocksize;
+	u16 numblocks;
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
+	{ "", 0x14, 0x10000, 32, },
+	{ 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+	{ 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+	{ 0 },
+};
+
+static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
+{
+	int i;
+	bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
+			BCMA_CC_FLASHCTL_START | opcode);
+	for (i = 0; i < 1000; i++) {
+		if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
+		      BCMA_CC_FLASHCTL_BUSY))
+			return;
+		cpu_relax();
+	}
+	bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
+}
+
 /* Initialize serial flash access */
 int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
-	bcma_err(cc->core->bus, "Serial flash support is broken\n");
+	struct bcma_bus *bus = cc->core->bus;
+	struct bcma_sflash *sflash = &cc->sflash;
+	struct bcma_sflash_tbl_e *e;
+	u32 id, id2;
+
+	switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+	case BCMA_CC_FLASHT_STSER:
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
+
+		bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+		id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+		bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+		id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+		switch (id) {
+		case 0xbf:
+			for (e = bcma_sflash_sst_tbl; e->name; e++) {
+				if (e->id == id2)
+					break;
+			}
+			break;
+		default:
+			for (e = bcma_sflash_st_tbl; e->name; e++) {
+				if (e->id == id)
+					break;
+			}
+			break;
+		}
+		if (!e->name) {
+			bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
+			return -ENOTSUPP;
+		}
+
+		break;
+	case BCMA_CC_FLASHT_ATSER:
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
+		id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
+
+		for (e = bcma_sflash_at_tbl; e->name; e++) {
+			if (e->id == id)
+				break;
+		}
+		if (!e->name) {
+			bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
+			return -ENOTSUPP;
+		}
+
+		break;
+	default:
+		bcma_err(bus, "Unsupported flash type\n");
+		return -ENOTSUPP;
+	}
+
+	sflash->window = BCMA_SFLASH;
+	sflash->blocksize = e->blocksize;
+	sflash->numblocks = e->numblocks;
+	sflash->size = sflash->blocksize * sflash->numblocks;
+	sflash->present = true;
+
+	bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+		  e->name, sflash->size / 1024, sflash->blocksize,
+		  sflash->numblocks);
+
+	/* Prepare platform device, but don't register it yet. It's too early,
+	 * malloc (required by device_private_init) is not available yet. */
+	bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
+					  sflash->size;
+	bcma_sflash_dev.dev.platform_data = sflash;
+
 	return 0;
 }
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 11b32d2..f7b0af7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -77,8 +77,8 @@
 }
 
 #ifdef CONFIG_BCMA_BLOCKIO
-void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
-			      size_t count, u16 offset, u8 reg_width)
+static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
+				     size_t count, u16 offset, u8 reg_width)
 {
 	void __iomem *addr = core->bus->mmio + offset;
 	if (core->bus->mapped_core != core)
@@ -100,8 +100,9 @@
 	}
 }
 
-void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
-			       size_t count, u16 offset, u8 reg_width)
+static void bcma_host_pci_block_write(struct bcma_device *core,
+				      const void *buffer, size_t count,
+				      u16 offset, u8 reg_width)
 {
 	void __iomem *addr = core->bus->mmio + offset;
 	if (core->bus->mapped_core != core)
@@ -139,7 +140,7 @@
 	iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
 }
 
-const struct bcma_host_ops bcma_host_pci_ops = {
+static const struct bcma_host_ops bcma_host_pci_ops = {
 	.read8		= bcma_host_pci_read8,
 	.read16		= bcma_host_pci_read16,
 	.read32		= bcma_host_pci_read32,
@@ -272,6 +273,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
 	{ 0, },
 };
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 3c381fb..3475e60 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -143,7 +143,7 @@
 	writel(value, core->io_wrap + offset);
 }
 
-const struct bcma_host_ops bcma_host_soc_ops = {
+static const struct bcma_host_ops bcma_host_soc_ops = {
 	.read8		= bcma_host_soc_read8,
 	.read16		= bcma_host_soc_read16,
 	.read32		= bcma_host_soc_read32,
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 758af9c..a8f570d 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -7,6 +7,7 @@
 
 #include "bcma_private.h"
 #include <linux/module.h>
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 
@@ -136,6 +137,22 @@
 		dev_id++;
 	}
 
+#ifdef CONFIG_BCMA_SFLASH
+	if (bus->drv_cc.sflash.present) {
+		err = platform_device_register(&bcma_sflash_dev);
+		if (err)
+			bcma_err(bus, "Error registering serial flash\n");
+	}
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+	if (bus->drv_cc.nflash.present) {
+		err = platform_device_register(&bcma_nflash_dev);
+		if (err)
+			bcma_err(bus, "Error registering NAND flash\n");
+	}
+#endif
+
 	return 0;
 }
 
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 26823d9..9ea4627 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -507,7 +507,9 @@
 		/* for these chips OTP is always available */
 		present = true;
 		break;
-
+	case BCMA_CHIP_ID_BCM43228:
+		present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
+		break;
 	default:
 		present = false;
 		break;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index acda773..38aa6dd 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -763,16 +763,7 @@
 		{
 			case CMD_TARGET_STATUS:
 				/* Pass it up to the upper layers... */
-				if( ei->ScsiStatus)
-                		{
-#if 0
-                    			printk(KERN_WARNING "cciss: cmd %p "
-						"has SCSI Status = %x\n",
-						c, ei->ScsiStatus);
-#endif
-					cmd->result |= (ei->ScsiStatus << 1);
-                		}
-				else {  /* scsi status is zero??? How??? */
+				if (!ei->ScsiStatus) {
 					
 	/* Ordinarily, this case should never happen, but there is a bug
 	   in some released firmware revisions that allows it to happen
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2e0e7fc..dbe6135 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3537,9 +3537,9 @@
 }
 
 /**
- * drbd_congested() - Callback for pdflush
+ * drbd_congested() - Callback for the flusher thread
  * @congested_data:	User data
- * @bdi_bits:		Bits pdflush is currently interested in
+ * @bdi_bits:		Bits the BDI flusher thread is currently interested in
  *
  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
  */
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 10308cd..11f36e5 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -79,6 +79,7 @@
 	{ USB_DEVICE(0x13d3, 0x3362) },
 	{ USB_DEVICE(0x0CF3, 0xE004) },
 	{ USB_DEVICE(0x0930, 0x0219) },
+	{ USB_DEVICE(0x0489, 0xe057) },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE02C) },
@@ -104,6 +105,7 @@
 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU22 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 37ae175..364f82b 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -177,7 +177,7 @@
 	if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
 		return -ENODEV;
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
 	if (!data) {
 		BT_ERR("Can't allocate memory for data structure");
 		return -ENOMEM;
@@ -189,14 +189,12 @@
 	data->urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!data->urb) {
 		BT_ERR("Can't allocate URB");
-		kfree(data);
 		return -ENOMEM;
 	}
 
 	if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
 		BT_ERR("Mini driver request failed");
 		usb_free_urb(data->urb);
-		kfree(data);
 		return -EIO;
 	}
 
@@ -209,7 +207,6 @@
 		BT_ERR("Can't allocate memory for mini driver");
 		release_firmware(firmware);
 		usb_free_urb(data->urb);
-		kfree(data);
 		return -ENOMEM;
 	}
 
@@ -224,7 +221,6 @@
 		BT_ERR("Firmware request failed");
 		usb_free_urb(data->urb);
 		kfree(data->buffer);
-		kfree(data);
 		return -EIO;
 	}
 
@@ -236,7 +232,6 @@
 		release_firmware(firmware);
 		usb_free_urb(data->urb);
 		kfree(data->buffer);
-		kfree(data);
 		return -ENOMEM;
 	}
 
@@ -271,7 +266,6 @@
 	usb_free_urb(data->urb);
 	kfree(data->fw_data);
 	kfree(data->buffer);
-	kfree(data);
 }
 
 static struct usb_driver bcm203x_driver = {
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 32e8251..995aee9 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -653,7 +653,7 @@
 	}
 
 	/* Initialize control structure and load firmware */
-	data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL);
+	data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL);
 	if (!data) {
 		BT_ERR("Can't allocate memory for control structure");
 		goto done;
@@ -674,7 +674,7 @@
 
 	if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
 		BT_ERR("Firmware request failed");
-		goto error;
+		goto done;
 	}
 
 	BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);
@@ -690,7 +690,7 @@
 	hdev = hci_alloc_dev();
 	if (!hdev) {
 		BT_ERR("Can't allocate HCI device");
-		goto error;
+		goto done;
 	}
 
 	data->hdev = hdev;
@@ -708,7 +708,7 @@
 	if (hci_register_dev(hdev) < 0) {
 		BT_ERR("Can't register HCI device");
 		hci_free_dev(hdev);
-		goto error;
+		goto done;
 	}
 
 	usb_set_intfdata(intf, data);
@@ -718,9 +718,6 @@
 release:
 	release_firmware(firmware);
 
-error:
-	kfree(data);
-
 done:
 	return -EIO;
 }
@@ -741,7 +738,6 @@
 
 	hci_unregister_dev(hdev);
 	hci_free_dev(hdev);
-	kfree(data);
 }
 
 static struct usb_driver bfusb_driver = {
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 66c3a67..0c0838d 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -849,7 +849,7 @@
 	bluecard_info_t *info;
 
 	/* Create new info device */
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
@@ -864,10 +864,7 @@
 
 static void bluecard_detach(struct pcmcia_device *link)
 {
-	bluecard_info_t *info = link->priv;
-
 	bluecard_release(link);
-	kfree(info);
 }
 
 
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 29caaed..2fe4a80 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -443,7 +443,7 @@
 	if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
 		return -ENODEV;
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
@@ -453,10 +453,8 @@
 	init_usb_anchor(&data->rx_anchor);
 
 	hdev = hci_alloc_dev();
-	if (!hdev) {
-		kfree(data);
+	if (!hdev)
 		return -ENOMEM;
-	}
 
 	hdev->bus = HCI_USB;
 	hci_set_drvdata(hdev, data);
@@ -475,7 +473,6 @@
 	err = hci_register_dev(hdev);
 	if (err < 0) {
 		hci_free_dev(hdev);
-		kfree(data);
 		return err;
 	}
 
@@ -500,7 +497,6 @@
 	hci_free_dev(data->hdev);
 	kfree_skb(data->rx_skb[0]);
 	kfree_skb(data->rx_skb[1]);
-	kfree(data);
 }
 
 static struct usb_driver bpa10x_driver = {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 8925b6d..7ffd3f4 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -638,7 +638,7 @@
 	bt3c_info_t *info;
 
 	/* Create new info device */
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
@@ -654,10 +654,7 @@
 
 static void bt3c_detach(struct pcmcia_device *link)
 {
-	bt3c_info_t *info = link->priv;
-
 	bt3c_release(link);
-	kfree(info);
 }
 
 static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 6a9e971..03b3acb 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -956,11 +956,9 @@
 	BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d",
 			id->vendor, id->device, id->class, func->num);
 
-	card = kzalloc(sizeof(*card), GFP_KERNEL);
-	if (!card) {
-		ret = -ENOMEM;
-		goto done;
-	}
+	card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL);
+	if (!card)
+		return -ENOMEM;
 
 	card->func = func;
 
@@ -974,8 +972,7 @@
 
 	if (btmrvl_sdio_register_dev(card) < 0) {
 		BT_ERR("Failed to register BT device!");
-		ret = -ENODEV;
-		goto free_card;
+		return -ENODEV;
 	}
 
 	/* Disable the interrupts on the card */
@@ -1023,9 +1020,6 @@
 	btmrvl_sdio_disable_host_int(card);
 unreg_dev:
 	btmrvl_sdio_unregister_dev(card);
-free_card:
-	kfree(card);
-done:
 	return ret;
 }
 
@@ -1047,7 +1041,6 @@
 			BT_DBG("unregester dev");
 			btmrvl_sdio_unregister_dev(card);
 			btmrvl_remove_card(card->priv);
-			kfree(card);
 		}
 	}
 }
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index e10ea03..4a99097 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -304,7 +304,7 @@
 		tuple = tuple->next;
 	}
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
@@ -315,10 +315,8 @@
 	skb_queue_head_init(&data->txq);
 
 	hdev = hci_alloc_dev();
-	if (!hdev) {
-		kfree(data);
+	if (!hdev)
 		return -ENOMEM;
-	}
 
 	hdev->bus = HCI_SDIO;
 	hci_set_drvdata(hdev, data);
@@ -340,7 +338,6 @@
 	err = hci_register_dev(hdev);
 	if (err < 0) {
 		hci_free_dev(hdev);
-		kfree(data);
 		return err;
 	}
 
@@ -366,7 +363,6 @@
 	hci_unregister_dev(hdev);
 
 	hci_free_dev(hdev);
-	kfree(data);
 }
 
 static struct sdio_driver btsdio_driver = {
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 21e803a..2f510a8 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -567,7 +567,7 @@
 	btuart_info_t *info;
 
 	/* Create new info device */
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
@@ -583,10 +583,7 @@
 
 static void btuart_detach(struct pcmcia_device *link)
 {
-	btuart_info_t *info = link->priv;
-
 	btuart_release(link);
-	kfree(info);
 }
 
 static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e272214..fa2a7d5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -98,6 +98,7 @@
 	{ USB_DEVICE(0x0a5c, 0x21e6) },
 	{ USB_DEVICE(0x0a5c, 0x21e8) },
 	{ USB_DEVICE(0x0a5c, 0x21f3) },
+	{ USB_DEVICE(0x0a5c, 0x21f4) },
 	{ USB_DEVICE(0x413c, 0x8197) },
 
 	/* Foxconn - Hon Hai */
@@ -133,6 +134,7 @@
 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -952,7 +954,7 @@
 			return -ENODEV;
 	}
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
@@ -975,10 +977,8 @@
 		}
 	}
 
-	if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
-		kfree(data);
+	if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
 		return -ENODEV;
-	}
 
 	data->cmdreq_type = USB_TYPE_CLASS;
 
@@ -998,10 +998,8 @@
 	init_usb_anchor(&data->deferred);
 
 	hdev = hci_alloc_dev();
-	if (!hdev) {
-		kfree(data);
+	if (!hdev)
 		return -ENOMEM;
-	}
 
 	hdev->bus = HCI_USB;
 	hci_set_drvdata(hdev, data);
@@ -1069,7 +1067,6 @@
 							data->isoc, data);
 		if (err < 0) {
 			hci_free_dev(hdev);
-			kfree(data);
 			return err;
 		}
 	}
@@ -1077,7 +1074,6 @@
 	err = hci_register_dev(hdev);
 	if (err < 0) {
 		hci_free_dev(hdev);
-		kfree(data);
 		return err;
 	}
 
@@ -1110,7 +1106,6 @@
 		usb_driver_release_interface(&btusb_driver, data->isoc);
 
 	hci_free_dev(hdev);
-	kfree(data);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 8869469..4ad7b35 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -297,16 +297,14 @@
 	struct hci_dev *hdev;
 	int err;
 
-	hst = kzalloc(sizeof(struct ti_st), GFP_KERNEL);
+	hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
 	if (!hst)
 		return -ENOMEM;
 
 	/* Expose "hciX" device to user space */
 	hdev = hci_alloc_dev();
-	if (!hdev) {
-		kfree(hst);
+	if (!hdev)
 		return -ENOMEM;
-	}
 
 	BT_DBG("hdev %p", hdev);
 
@@ -321,7 +319,6 @@
 	err = hci_register_dev(hdev);
 	if (err < 0) {
 		BT_ERR("Can't register HCI device error %d", err);
-		kfree(hst);
 		hci_free_dev(hdev);
 		return err;
 	}
@@ -347,7 +344,6 @@
 	hci_unregister_dev(hdev);
 
 	hci_free_dev(hdev);
-	kfree(hst);
 
 	dev_set_drvdata(&pdev->dev, NULL);
 	return 0;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 97a7784..036cb36 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -550,7 +550,7 @@
 	dtl1_info_t *info;
 
 	/* Create new info device */
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
@@ -569,7 +569,6 @@
 
 	dtl1_close(info);
 	pcmcia_disable_device(link);
-	kfree(info);
 }
 
 static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 5722642..6f007b6 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -239,16 +239,45 @@
 #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG		0x016A
 #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB		0x0F00 /* VLV1 */
 #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG		0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB				0x0400 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_HASWELL_HB			0x0400 /* Desktop */
 #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG		0x0402
 #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG		0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB			0x0404 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG	0x0422
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB		0x0404 /* Mobile */
 #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG		0x0406
 #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG		0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB			0x0408 /* Server */
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG	0x0426
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB		0x0408 /* Server */
 #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG		0x040a
 #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG		0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV		0x0c16 /* SDV */
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB			0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG	0x042a
+#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB		0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG	0x0C02
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG	0x0C12
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG	0x0C22
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG	0x0C06
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG	0x0C16
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG	0x0C26
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG	0x0C0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG	0x0C1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG	0x0C2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG	0x0A02
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG	0x0A12
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG	0x0A22
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG	0x0A06
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG	0x0A16
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG	0x0A26
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG	0x0A0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG	0x0A1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG	0x0A2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG	0x0D12
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG	0x0D22
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG	0x0D32
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG	0x0D16
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG	0x0D26
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG	0x0D36
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG	0x0D1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG	0x0D2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG	0x0D3A
 
 #endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9ed92ef..08fc5cb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1502,15 +1502,73 @@
 	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
 	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
 	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
 	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
 	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
 	    "Haswell", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV,
+	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
 	    "Haswell", &sandybridge_gtt_driver },
 	{ 0, NULL, NULL }
 };
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index d706bd0e..4fbdceb 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -160,7 +160,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 
 static int omap_rng_suspend(struct device *dev)
 {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 89682fa..c4be351 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -807,6 +807,7 @@
 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
 #endif
 
+#ifdef CONFIG_PM_SLEEP
 static int tpm_tis_resume(struct device *dev)
 {
 	struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -816,6 +817,7 @@
 
 	return tpm_pm_resume(dev);
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
 
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 540795c..d927938 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -53,7 +53,7 @@
 #define MFGPT_PERIODIC (MFGPT_HZ / HZ)
 
 /*
- * The MFPGT timers on the CS5536 provide us with suitable timers to use
+ * The MFGPT timers on the CS5536 provide us with suitable timers to use
  * as clock event sources - not as good as a HPET or APIC, but certainly
  * better than the PIT.  This isn't a general purpose MFGPT driver, but
  * a simplified one designed specifically to act as a clock event source.
@@ -144,7 +144,7 @@
 
 	timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
 	if (!timer) {
-		printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n");
+		printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n");
 		return -ENODEV;
 	}
 	cs5535_event_clock = timer;
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index cdc02ac..503996a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -454,6 +454,7 @@
 					mem_resource->address_length);
 	if (pcch_virt_addr == NULL) {
 		pr_debug("probe: could not map shared mem region\n");
+		ret = -ENOMEM;
 		goto out_free;
 	}
 	pcch_hdr = pcch_virt_addr;
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 2c9bf26..3265844 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -678,10 +678,22 @@
 	int cpu = (unsigned long)hcpu;
 	struct cpuidle_device *dev;
 
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_UP_PREPARE:
+	case CPU_DOWN_PREPARE:
+	case CPU_ONLINE:
+	case CPU_DEAD:
+	case CPU_UP_CANCELED:
+	case CPU_DOWN_FAILED:
+		break;
+	default:
+		return NOTIFY_OK;
+	}
+
 	mutex_lock(&cpuidle_lock);
 
 	dev = per_cpu(cpuidle_devices, cpu);
-	if (!dev->coupled)
+	if (!dev || !dev->coupled)
 		goto out;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index fcfeb3c..5084975 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -172,7 +172,8 @@
 	struct device_dma_parameters	dma_parms;
 	struct dma_device		dma_device;
 	void __iomem			*base;
-	struct clk			*dma_clk;
+	struct clk			*dma_ahb;
+	struct clk			*dma_ipg;
 	spinlock_t			lock;
 	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
 	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
@@ -976,10 +977,20 @@
 		return 0;
 	}
 
-	imxdma->dma_clk = clk_get(NULL, "dma");
-	if (IS_ERR(imxdma->dma_clk))
-		return PTR_ERR(imxdma->dma_clk);
-	clk_enable(imxdma->dma_clk);
+	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(imxdma->dma_ipg)) {
+		ret = PTR_ERR(imxdma->dma_ipg);
+		goto err_clk;
+	}
+
+	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(imxdma->dma_ahb)) {
+		ret = PTR_ERR(imxdma->dma_ahb);
+		goto err_clk;
+	}
+
+	clk_prepare_enable(imxdma->dma_ipg);
+	clk_prepare_enable(imxdma->dma_ahb);
 
 	/* reset DMA module */
 	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -988,16 +999,14 @@
 		ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
-			kfree(imxdma);
-			return ret;
+			goto err_enable;
 		}
 
 		ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
 			free_irq(MX1_DMA_INT, NULL);
-			kfree(imxdma);
-			return ret;
+			goto err_enable;
 		}
 	}
 
@@ -1094,7 +1103,10 @@
 		free_irq(MX1_DMA_INT, NULL);
 		free_irq(MX1_DMA_ERR, NULL);
 	}
-
+err_enable:
+	clk_disable_unprepare(imxdma->dma_ipg);
+	clk_disable_unprepare(imxdma->dma_ahb);
+err_clk:
 	kfree(imxdma);
 	return ret;
 }
@@ -1114,7 +1126,9 @@
 		free_irq(MX1_DMA_ERR, NULL);
 	}
 
-        kfree(imxdma);
+	clk_disable_unprepare(imxdma->dma_ipg);
+	clk_disable_unprepare(imxdma->dma_ahb);
+	kfree(imxdma);
 
         return 0;
 }
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d52dbc6..24acd71 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1119,15 +1119,21 @@
 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
 {
 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+	struct tegra_dma *tdma = tdc->tdma;
+	int ret;
 
 	dma_cookie_init(&tdc->dma_chan);
 	tdc->config_init = false;
-	return 0;
+	ret = clk_prepare_enable(tdma->dma_clk);
+	if (ret < 0)
+		dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
+	return ret;
 }
 
 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
 {
 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+	struct tegra_dma *tdma = tdc->tdma;
 
 	struct tegra_dma_desc *dma_desc;
 	struct tegra_dma_sg_req *sg_req;
@@ -1163,6 +1169,7 @@
 		list_del(&sg_req->node);
 		kfree(sg_req);
 	}
+	clk_disable_unprepare(tdma->dma_clk);
 }
 
 /* Tegra20 specific DMA controller information */
@@ -1255,6 +1262,13 @@
 		}
 	}
 
+	/* Enable clock before accessing registers */
+	ret = clk_prepare_enable(tdma->dma_clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+		goto err_pm_disable;
+	}
+
 	/* Reset DMA controller */
 	tegra_periph_reset_assert(tdma->dma_clk);
 	udelay(2);
@@ -1265,6 +1279,8 @@
 	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
 	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
 
+	clk_disable_unprepare(tdma->dma_clk);
+
 	INIT_LIST_HEAD(&tdma->dma_dev.channels);
 	for (i = 0; i < cdata->nr_channels; i++) {
 		struct tegra_dma_channel *tdc = &tdma->channels[i];
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe3db45..3cc152e 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -107,7 +107,8 @@
 	if (ret < 0)
 		return ret;
 
-	ret = gpio_request_one(extcon_data->gpio, GPIOF_DIR_IN, pdev->name);
+	ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
+				    pdev->name);
 	if (ret < 0)
 		goto err;
 
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 150d976..ae37181 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -266,7 +266,7 @@
 	return 0;
 }
 
-static void __devexit em_gio_irq_domain_cleanup(struct em_gio_priv *p)
+static void em_gio_irq_domain_cleanup(struct em_gio_priv *p)
 {
 	struct gpio_em_config *pdata = p->pdev->dev.platform_data;
 
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index a1c8754..202a992 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -339,7 +339,7 @@
 	resource_size_t start, len;
 	struct lnw_gpio *lnw;
 	u32 gpio_base;
-	int retval = 0;
+	int retval;
 	int ngpio = id->driver_data;
 
 	retval = pci_enable_device(pdev);
@@ -357,6 +357,7 @@
 	base = ioremap_nocache(start, len);
 	if (!base) {
 		dev_err(&pdev->dev, "error mapping bar1\n");
+		retval = -EFAULT;
 		goto err3;
 	}
 	gpio_base = *((u32 *)base + 1);
@@ -381,8 +382,10 @@
 
 	lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
 					    &lnw_gpio_irq_ops, lnw);
-	if (!lnw->domain)
+	if (!lnw->domain) {
+		retval = -ENOMEM;
 		goto err3;
+	}
 
 	lnw->reg_base = base;
 	lnw->chip.label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 71a838f..b389862 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -99,7 +99,7 @@
 	if (offset < 20)
 		return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
 
-	return INTEL_MSIC_GPIO1HV0CTLO + offset + 20;
+	return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
 }
 
 static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4db460b..80f44bb 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -465,9 +465,8 @@
 		goto out_iounmap;
 
 	port->bgc.gc.to_irq = mxc_gpio_to_irq;
-	port->bgc.gc.base = pdev->id * 32;
-	port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir);
-	port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
+	port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
+					     pdev->id * 32;
 
 	err = gpiochip_add(&port->bgc.gc);
 	if (err)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 58a6a63..9cac88a 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -62,6 +62,7 @@
 
 #ifdef CONFIG_OF
 static struct irq_domain *domain;
+static struct device_node *pxa_gpio_of_node;
 #endif
 
 struct pxa_gpio_chip {
@@ -277,6 +278,24 @@
 				(value ? GPSR_OFFSET : GPCR_OFFSET));
 }
 
+#ifdef CONFIG_OF_GPIO
+static int pxa_gpio_of_xlate(struct gpio_chip *gc,
+			     const struct of_phandle_args *gpiospec,
+			     u32 *flags)
+{
+	if (gpiospec->args[0] > pxa_last_gpio)
+		return -EINVAL;
+
+	if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip)
+		return -EINVAL;
+
+	if (flags)
+		*flags = gpiospec->args[1];
+
+	return gpiospec->args[0] % 32;
+}
+#endif
+
 static int __devinit pxa_init_gpio_chip(int gpio_end,
 					int (*set_wake)(unsigned int, unsigned int))
 {
@@ -304,6 +323,11 @@
 		c->get = pxa_gpio_get;
 		c->set = pxa_gpio_set;
 		c->to_irq = pxa_gpio_to_irq;
+#ifdef CONFIG_OF_GPIO
+		c->of_node = pxa_gpio_of_node;
+		c->of_xlate = pxa_gpio_of_xlate;
+		c->of_gpio_n_cells = 2;
+#endif
 
 		/* number of GPIOs on last bank may be less than 32 */
 		c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -488,6 +512,7 @@
 	return count;
 }
 
+#ifdef CONFIG_OF
 static struct of_device_id pxa_gpio_dt_ids[] = {
 	{ .compatible = "mrvl,pxa-gpio" },
 	{ .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO },
@@ -505,9 +530,9 @@
 
 const struct irq_domain_ops pxa_irq_domain_ops = {
 	.map	= pxa_irq_domain_map,
+	.xlate	= irq_domain_xlate_twocell,
 };
 
-#ifdef CONFIG_OF
 static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
 {
 	int ret, nr_banks, nr_gpios, irq_base;
@@ -545,6 +570,7 @@
 	}
 	domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0,
 				       &pxa_irq_domain_ops, NULL);
+	pxa_gpio_of_node = np;
 	return 0;
 err:
 	iounmap(gpio_reg_base);
@@ -653,7 +679,7 @@
 	.probe		= pxa_gpio_probe,
 	.driver		= {
 		.name	= "pxa-gpio",
-		.of_match_table = pxa_gpio_dt_ids,
+		.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
 	},
 };
 
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 92f7b2b..ba126cc 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,12 +2454,6 @@
 		},
 	}, {
 		.chip	= {
-			.base	= EXYNOS5_GPC4(0),
-			.ngpio	= EXYNOS5_GPIO_C4_NR,
-			.label	= "GPC4",
-		},
-	}, {
-		.chip	= {
 			.base	= EXYNOS5_GPD0(0),
 			.ngpio	= EXYNOS5_GPIO_D0_NR,
 			.label	= "GPD0",
@@ -2513,6 +2507,12 @@
 			.label	= "GPY6",
 		},
 	}, {
+		.chip	= {
+			.base	= EXYNOS5_GPC4(0),
+			.ngpio	= EXYNOS5_GPIO_C4_NR,
+			.label	= "GPC4",
+		},
+	}, {
 		.config	= &samsung_gpio_cfgs[9],
 		.irq_base = IRQ_EINT(0),
 		.chip	= {
@@ -2836,7 +2836,7 @@
 	}
 
 	/* need to set base address for gpc4 */
-	exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
+	exynos5_gpios_1[20].base = gpio_base1 + 0x2E0;
 
 	/* need to set base address for gpx */
 	chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 424dce8..8707d45 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -241,7 +241,8 @@
 			break;
 
 		default:
-			return -ENODEV;
+			err = -ENODEV;
+			goto err_sch_gpio_core;
 	}
 
 	sch_gpio_core.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 23120c0..90e2808 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,6 +22,7 @@
 config DRM_USB
 	tristate
 	depends on DRM
+	depends on USB_ARCH_HAS_HCD
 	select USB
 
 config DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 66d4a28..0303935d 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -119,7 +119,7 @@
 {
 	const struct firmware *fw;
 	struct platform_device *pdev;
-	u8 *fwdata = NULL, *edid;
+	u8 *fwdata = NULL, *edid, *new_edid;
 	int fwsize, expected;
 	int builtin = 0, err = 0;
 	int i, valid_extensions = 0;
@@ -195,12 +195,14 @@
 		    "\"%s\" for connector \"%s\"\n", valid_extensions,
 		    edid[0x7e], name, connector_name);
 		edid[0x7e] = valid_extensions;
-		edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
+		new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
 		    GFP_KERNEL);
-		if (edid == NULL) {
+		if (new_edid == NULL) {
 			err = -ENOMEM;
+			kfree(edid);
 			goto relfw_out;
 		}
+		edid = new_edid;
 	}
 
 	connector->display_info.raw_edid = edid;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ed22612..a24ffbe 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -346,11 +346,40 @@
 	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
 	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
 	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
 	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
 	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
 	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
 	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
-	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
+	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
+	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
+	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
+	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
+	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
+	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
+	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
+	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
+	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
+	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
+	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
+	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
+	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
+	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
+	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
+	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
+	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
+	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
+	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
+	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
+	INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
+	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
+	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
+	INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
+	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
+	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
+	INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
 	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
 	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
 	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index da8b01f..a9d58d7 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -451,7 +451,6 @@
 	struct drm_i915_file_private *file_priv = NULL;
 	struct i915_hw_context *to;
 	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
-	int ret;
 
 	if (dev_priv->hw_contexts_disabled)
 		return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5af631e..ff2819e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -291,6 +291,16 @@
 	target_i915_obj = to_intel_bo(target_obj);
 	target_offset = target_i915_obj->gtt_offset;
 
+	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+	 * pipe_control writes because the gpu doesn't properly redirect them
+	 * through the ppgtt for non_secure batchbuffers. */
+	if (unlikely(IS_GEN6(dev) &&
+	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+	    !target_i915_obj->has_global_gtt_mapping)) {
+		i915_gem_gtt_bind_object(target_i915_obj,
+					 target_i915_obj->cache_level);
+	}
+
 	/* The target buffer should have appeared before us in the
 	 * exec_object list, so it should have a GTT space bound by now.
 	 */
@@ -399,16 +409,6 @@
 		io_mapping_unmap_atomic(reloc_page);
 	}
 
-	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
-	 * pipe_control writes because the gpu doesn't properly redirect them
-	 * through the ppgtt for non_secure batchbuffers. */
-	if (unlikely(IS_GEN6(dev) &&
-	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-	    !target_i915_obj->has_global_gtt_mapping)) {
-		i915_gem_gtt_bind_object(target_i915_obj,
-					 target_i915_obj->cache_level);
-	}
-
 	/* and update the user's relocation entry */
 	reloc->presumed_offset = target_offset;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9fd25a4..ee9b68f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -361,7 +361,8 @@
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->mm.gtt->needs_dmar)
+	/* don't map imported dma buf objects */
+	if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
 		return intel_gtt_map_memory(obj->pages,
 					    obj->base.size >> PAGE_SHIFT,
 					    &obj->sg_list,
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2f5388af..7631807 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+#ifdef CONFIG_PM
 static u32 calc_residency(struct drm_device *dev, const u32 reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,3 +225,14 @@
 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
 }
+#else
+void i915_setup_sysfs(struct drm_device *dev)
+{
+	return;
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+	return;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f615976..a69a3d0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -869,6 +869,7 @@
 	unsigned long bestppm, ppm, absppm;
 	int dotclk, flag;
 
+	flag = 0;
 	dotclk = target * 1000;
 	bestppm = 1000000;
 	ppm = absppm = 0;
@@ -3753,17 +3754,6 @@
 			continue;
 		}
 
-		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
-			/* Use VBT settings if we have an eDP panel */
-			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
-			if (edp_bpc < display_bpc) {
-				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
-				display_bpc = edp_bpc;
-			}
-			continue;
-		}
-
 		/* Not one of the known troublemakers, check the EDID */
 		list_for_each_entry(connector, &dev->mode_config.connector_list,
 				    head) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0a56b9a..a6c426a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1174,10 +1174,14 @@
 	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
 	pp = ironlake_get_pp_control(dev_priv);
-	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+	/* We need to switch off panel power _and_ force vdd, for otherwise some
+	 * panels get very unhappy and cease to work. */
+	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 	I915_WRITE(PCH_PP_CONTROL, pp);
 	POSTING_READ(PCH_PP_CONTROL);
 
+	intel_dp->want_panel_vdd = false;
+
 	ironlake_wait_panel_off(intel_dp);
 }
 
@@ -1287,11 +1291,9 @@
 	 * ensure that we have vdd while we switch off the panel. */
 	ironlake_edp_panel_vdd_on(intel_dp);
 	ironlake_edp_backlight_off(intel_dp);
-	ironlake_edp_panel_off(intel_dp);
-
 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	ironlake_edp_panel_off(intel_dp);
 	intel_dp_link_down(intel_dp);
-	ironlake_edp_panel_vdd_off(intel_dp, false);
 }
 
 static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1326,11 +1328,9 @@
 		/* Switching the panel off requires vdd. */
 		ironlake_edp_panel_vdd_on(intel_dp);
 		ironlake_edp_backlight_off(intel_dp);
-		ironlake_edp_panel_off(intel_dp);
-
 		intel_dp_sink_dpms(intel_dp, mode);
+		ironlake_edp_panel_off(intel_dp);
 		intel_dp_link_down(intel_dp);
-		ironlake_edp_panel_vdd_off(intel_dp, false);
 
 		if (is_cpu_edp(intel_dp))
 			ironlake_edp_pll_off(encoder);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8435355..132ab51 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -46,15 +46,16 @@
 })
 
 #define wait_for_atomic_us(COND, US) ({ \
-	int i, ret__ = -ETIMEDOUT;	\
-	for (i = 0; i < (US); i++) {	\
-		if ((COND)) {		\
-			ret__ = 0;	\
-			break;		\
-		}			\
-		udelay(1);		\
-	}				\
-	ret__;				\
+	unsigned long timeout__ = jiffies + usecs_to_jiffies(US);	\
+	int ret__ = 0;							\
+	while (!(COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			ret__ = -ETIMEDOUT;				\
+			break;						\
+		}							\
+		cpu_relax();						\
+	}								\
+	ret__;								\
 })
 
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
@@ -380,7 +381,6 @@
 				    const struct drm_display_mode *mode,
 				    struct drm_display_mode *adjusted_mode);
 extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
-extern u32 intel_panel_get_backlight(struct drm_device *dev);
 extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
 extern int intel_panel_setup_backlight(struct drm_device *dev);
 extern void intel_panel_enable_backlight(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1991a44..b9755f6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -486,9 +486,6 @@
 		bus->dev_priv = dev_priv;
 
 		bus->adapter.algo = &gmbus_algorithm;
-		ret = i2c_add_adapter(&bus->adapter);
-		if (ret)
-			goto err;
 
 		/* By default use a conservative clock rate */
 		bus->reg0 = port | GMBUS_RATE_100KHZ;
@@ -498,6 +495,10 @@
 			bus->force_bit = true;
 
 		intel_gpio_setup(bus, port);
+
+		ret = i2c_add_adapter(&bus->adapter);
+		if (ret)
+			goto err;
 	}
 
 	intel_i2c_reset(dev_priv->dev);
@@ -540,9 +541,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	if (dev_priv->gmbus == NULL)
-		return;
-
 	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
 		struct intel_gmbus *bus = &dev_priv->gmbus[i];
 		i2c_del_adapter(&bus->adapter);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 10c7d39..3df4f5f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -213,7 +213,7 @@
 	return val;
 }
 
-u32 intel_panel_get_backlight(struct drm_device *dev)
+static u32 intel_panel_get_backlight(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
@@ -311,9 +311,6 @@
 	if (dev_priv->backlight_level == 0)
 		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
 
-	dev_priv->backlight_enabled = true;
-	intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
-
 	if (INTEL_INFO(dev)->gen >= 4) {
 		uint32_t reg, tmp;
 
@@ -326,7 +323,7 @@
 		 * we don't track the backlight dpms state, hence check whether
 		 * we have to do anything first. */
 		if (tmp & BLM_PWM_ENABLE)
-			return;
+			goto set_level;
 
 		if (dev_priv->num_pipe == 3)
 			tmp &= ~BLM_PIPE_SELECT_IVB;
@@ -347,6 +344,14 @@
 			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
 		}
 	}
+
+set_level:
+	/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+	 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+	 * registers are set.
+	 */
+	dev_priv->backlight_enabled = true;
+	intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
 }
 
 static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 94aabca..58c07cd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3963,6 +3963,7 @@
 		DRM_ERROR("Force wake wait timed out\n");
 
 	I915_WRITE_NOTRACE(FORCEWAKE, 1);
+	POSTING_READ(FORCEWAKE);
 
 	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
 		DRM_ERROR("Force wake wait timed out\n");
@@ -3983,6 +3984,7 @@
 		DRM_ERROR("Force wake wait timed out\n");
 
 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
+	POSTING_READ(FORCEWAKE_MT);
 
 	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
 		DRM_ERROR("Force wake wait timed out\n");
@@ -4018,14 +4020,14 @@
 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
-	/* The below doubles as a POSTING_READ */
+	POSTING_READ(FORCEWAKE);
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
-	/* The below doubles as a POSTING_READ */
+	POSTING_READ(FORCEWAKE_MT);
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index bf0195a..e2a73b3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -227,31 +227,36 @@
 	 * number of bits based on the write domains has little performance
 	 * impact.
 	 */
-	flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-	flags |= PIPE_CONTROL_TLB_INVALIDATE;
-	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
-	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
-	flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
-	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
-	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
-	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-	/*
-	 * Ensure that any following seqno writes only happen when the render
-	 * cache is indeed flushed (but only if the caller actually wants that).
-	 */
-	if (flush_domains)
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		/*
+		 * Ensure that any following seqno writes only happen
+		 * when the render cache is indeed flushed.
+		 */
 		flags |= PIPE_CONTROL_CS_STALL;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		/*
+		 * TLB invalidate requires a post-sync write.
+		 */
+		flags |= PIPE_CONTROL_QW_WRITE;
+	}
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
 	intel_ring_emit(ring, flags);
 	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* lower dword */
-	intel_ring_emit(ring, 0); /* uppwer dword */
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
 
 	return 0;
@@ -289,8 +294,6 @@
 	I915_WRITE_HEAD(ring, 0);
 	ring->write_tail(ring, 0);
 
-	/* Initialize the ring. */
-	I915_WRITE_START(ring, obj->gtt_offset);
 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
 	/* G45 ring initialization fails to reset head to zero */
@@ -316,6 +319,11 @@
 		}
 	}
 
+	/* Initialize the ring. This must happen _after_ we've cleared the ring
+	 * registers with the above sequence (the readback of the HEAD registers
+	 * also enforces ordering), otherwise the hw might lose the new ring
+	 * register values. */
+	I915_WRITE_START(ring, obj->gtt_offset);
 	I915_WRITE_CTL(ring,
 			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26a6a4d..d172e98 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -444,13 +444,16 @@
 	struct i2c_msg *msgs;
 	int i, ret = true;
 
+        /* Would be simpler to allocate both in one go ? */        
 	buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
 	if (!buf)
 		return false;
 
 	msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
-	if (!msgs)
+	if (!msgs) {
+	        kfree(buf);
 		return false;
+        }
 
 	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a4d7c50..b69642d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -468,10 +468,11 @@
 {
 	unsigned int vcomax, vcomin, pllreffreq;
 	unsigned int delta, tmpdelta;
-	unsigned int testr, testn, testm, testo;
+	int testr, testn, testm, testo;
 	unsigned int p, m, n;
-	unsigned int computed;
+	unsigned int computed, vco;
 	int tmp;
+	const unsigned int m_div_val[] = { 1, 2, 4, 8 };
 
 	m = n = p = 0;
 	vcomax = 1488000;
@@ -490,12 +491,13 @@
 				if (delta == 0)
 					break;
 				for (testo = 5; testo < 33; testo++) {
-					computed = pllreffreq * (testn + 1) /
+					vco = pllreffreq * (testn + 1) /
 						(testr + 1);
-					if (computed < vcomin)
+					if (vco < vcomin)
 						continue;
-					if (computed > vcomax)
+					if (vco > vcomax)
 						continue;
+					computed = vco / (m_div_val[testm] * (testo + 1));
 					if (computed > clock)
 						tmpdelta = computed - clock;
 					else
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index fc841e8..26ebffe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -211,11 +211,6 @@
 	return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
 }
 
-static int nouveau_dsm_init(void)
-{
-	return 0;
-}
-
 static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
 {
 	/* easy option one - intel vendor ID means Integrated */
@@ -232,7 +227,6 @@
 static struct vga_switcheroo_handler nouveau_dsm_handler = {
 	.switchto = nouveau_dsm_switchto,
 	.power_state = nouveau_dsm_power_state,
-	.init = nouveau_dsm_init,
 	.get_client_id = nouveau_dsm_get_client_id,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 77e5646..240cf96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -229,7 +229,7 @@
 			}
 			break;
 		case 6: /* NV50- DP AUX */
-			port->drive = entry[0];
+			port->drive = entry[0] & 0x0f;
 			port->sense = port->drive;
 			port->adapter.algo = &nouveau_dp_i2c_algo;
 			break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 1cdfd6e..1866dbb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -731,7 +731,6 @@
 			case 0xa3:
 			case 0xa5:
 			case 0xa8:
-			case 0xaf:
 				nva3_copy_create(dev);
 				break;
 			}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
index cc82d79..c564c5e 100644
--- a/drivers/gpu/drm/nouveau/nv84_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -117,17 +117,22 @@
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	unsigned long flags;
+	u32 save;
 
 	/* remove channel from playlist, will context switch if active */
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 	nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
 	nv50_fifo_playlist_update(dev);
 
+	save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
 	/* tell any engines on this channel to unload their contexts */
 	nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
 	if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
 		NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
 
+	nv_wr32(dev, 0x002520, save);
+
 	nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
@@ -184,10 +189,13 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv84_fifo_priv *priv = nv_engine(dev, engine);
 	int i;
+	u32 save;
 
 	/* set playlist length to zero, fifo will unload context */
 	nv_wr32(dev, 0x0032ec, 0);
 
+	save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
 	/* tell all connected engines to unload their contexts */
 	for (i = 0; i < priv->base.channels; i++) {
 		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
@@ -199,6 +207,7 @@
 		}
 	}
 
+	nv_wr32(dev, 0x002520, save);
 	nv_wr32(dev, 0x002140, 0);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 7c95c44..4e712b1 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -557,7 +557,7 @@
 	nouveau_mem_exec(&exec, info->perflvl);
 
 	if (dev_priv->chipset < 0xd0)
-		nv_wr32(dev, 0x611200, 0x00003300);
+		nv_wr32(dev, 0x611200, 0x00003330);
 	else
 		nv_wr32(dev, 0x62c000, 0x03030300);
 }
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index d0d60e1..dac525b 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -790,7 +790,7 @@
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	int ch = EVO_CURS(nv_crtc->index);
 
-	evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+	evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
 	evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
index 1855ecbd..e98d144 100644
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -294,6 +294,25 @@
 	printk(" on channel 0x%010llx\n", (u64)inst << 12);
 }
 
+static int
+nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
+{
+	struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	if (likely(chid >= 0 && chid < priv->base.channels)) {
+		chan = dev_priv->channels.ptr[chid];
+		if (likely(chan))
+			ret = nouveau_finish_page_flip(chan, NULL);
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return ret;
+}
+
 static void
 nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
 {
@@ -303,11 +322,21 @@
 	u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
 	u32 subc = (addr & 0x00070000);
 	u32 mthd = (addr & 0x00003ffc);
+	u32 show = stat;
 
-	NV_INFO(dev, "PSUBFIFO %d:", unit);
-	nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
-	NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
-		unit, chid, subc, mthd, data);
+	if (stat & 0x00200000) {
+		if (mthd == 0x0054) {
+			if (!nve0_fifo_page_flip(dev, chid))
+				show &= ~0x00200000;
+		}
+	}
+
+	if (show) {
+		NV_INFO(dev, "PFIFO%d:", unit);
+		nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+		NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+			unit, chid, subc, mthd, data);
+	}
 
 	nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
 	nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9e6f76f..c6fcb5b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -259,7 +259,7 @@
 		/* adjust pm to dpms changes BEFORE enabling crtcs */
 		radeon_pm_compute_clocks(rdev);
 		/* disable crtc pair power gating before programming */
-		if (ASIC_IS_DCE6(rdev))
+		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
 			atombios_powergate_crtc(crtc, ATOM_DISABLE);
 		atombios_enable_crtc(crtc, ATOM_ENABLE);
 		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -279,7 +279,7 @@
 		atombios_enable_crtc(crtc, ATOM_DISABLE);
 		radeon_crtc->enabled = false;
 		/* power gating is per-pair */
-		if (ASIC_IS_DCE6(rdev)) {
+		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) {
 			struct drm_crtc *other_crtc;
 			struct radeon_crtc *other_radeon_crtc;
 			list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
@@ -1531,12 +1531,12 @@
 				 * crtc virtual pixel clock.
 				 */
 				if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
-					if (ASIC_IS_DCE5(rdev))
-						return ATOM_DCPLL;
+					if (rdev->clock.dp_extclk)
+						return ATOM_PPLL_INVALID;
 					else if (ASIC_IS_DCE6(rdev))
 						return ATOM_PPLL0;
-					else if (rdev->clock.dp_extclk)
-						return ATOM_PPLL_INVALID;
+					else if (ASIC_IS_DCE5(rdev))
+						return ATOM_DCPLL;
 				}
 			}
 		}
@@ -1635,18 +1635,28 @@
 static void atombios_crtc_prepare(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
 
+	radeon_crtc->in_mode_set = true;
 	/* pick pll */
 	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
 
+	/* disable crtc pair power gating before programming */
+	if (ASIC_IS_DCE6(rdev))
+		atombios_powergate_crtc(crtc, ATOM_DISABLE);
+
 	atombios_lock_crtc(crtc, ATOM_ENABLE);
 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 }
 
 static void atombios_crtc_commit(struct drm_crtc *crtc)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
 	atombios_lock_crtc(crtc, ATOM_DISABLE);
+	radeon_crtc->in_mode_set = false;
 }
 
 static void atombios_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e585a3b..e93b80a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1229,24 +1229,8 @@
 
 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
 {
-	save->vga_control[0] = RREG32(D1VGA_CONTROL);
-	save->vga_control[1] = RREG32(D2VGA_CONTROL);
 	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
 	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
-	save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
-	save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-	if (rdev->num_crtc >= 4) {
-		save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
-		save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
-		save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
-		save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
-	}
-	if (rdev->num_crtc >= 6) {
-		save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
-		save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
-		save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
-		save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
-	}
 
 	/* Stop all video */
 	WREG32(VGA_RENDER_CONTROL, 0);
@@ -1357,47 +1341,6 @@
 	/* Unlock host access */
 	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
 	mdelay(1);
-	/* Restore video state */
-	WREG32(D1VGA_CONTROL, save->vga_control[0]);
-	WREG32(D2VGA_CONTROL, save->vga_control[1]);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
-		WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
-		WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
-	}
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
-	}
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
-	}
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
-	}
 	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
@@ -1986,10 +1929,18 @@
 	if (rdev->flags & RADEON_IS_IGP)
 		rdev->config.evergreen.tile_config |= 1 << 4;
 	else {
-		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-			rdev->config.evergreen.tile_config |= 1 << 4;
-		else
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
 			rdev->config.evergreen.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.evergreen.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.evergreen.tile_config |= 2 << 4;
+			break;
+		}
 	}
 	rdev->config.evergreen.tile_config |= 0 << 8;
 	rdev->config.evergreen.tile_config |=
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c1655412..e44a62a 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -788,6 +788,13 @@
 	case V_030000_SQ_TEX_DIM_1D_ARRAY:
 	case V_030000_SQ_TEX_DIM_2D_ARRAY:
 		depth = 1;
+		break;
+	case V_030000_SQ_TEX_DIM_2D_MSAA:
+	case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		surf.nsamples = 1 << llevel;
+		llevel = 0;
+		depth = 1;
+		break;
 	case V_030000_SQ_TEX_DIM_3D:
 		break;
 	default:
@@ -961,13 +968,15 @@
 
 	if (track->db_dirty) {
 		/* Check stencil buffer */
-		if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+		if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
+		    G_028800_STENCIL_ENABLE(track->db_depth_control)) {
 			r = evergreen_cs_track_validate_stencil(p);
 			if (r)
 				return r;
 		}
 		/* Check depth buffer */
-		if (G_028800_Z_ENABLE(track->db_depth_control)) {
+		if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
+		    G_028800_Z_ENABLE(track->db_depth_control)) {
 			r = evergreen_cs_track_validate_depth(p);
 			if (r)
 				return r;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index d3bd098..7934785 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1277,6 +1277,8 @@
 #define   S_028044_FORMAT(x)                           (((x) & 0x1) << 0)
 #define   G_028044_FORMAT(x)                           (((x) >> 0) & 0x1)
 #define   C_028044_FORMAT                              0xFFFFFFFE
+#define	    V_028044_STENCIL_INVALID			0
+#define	    V_028044_STENCIL_8				1
 #define   G_028044_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
 #define DB_Z_READ_BASE					0x28048
 #define DB_STENCIL_READ_BASE				0x2804c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9945d86..853800e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -574,10 +574,18 @@
 	if (rdev->flags & RADEON_IS_IGP)
 		rdev->config.cayman.tile_config |= 1 << 4;
 	else {
-		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-			rdev->config.cayman.tile_config |= 1 << 4;
-		else
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
 			rdev->config.cayman.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.cayman.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.cayman.tile_config |= 2 << 4;
+			break;
+		}
 	}
 	rdev->config.cayman.tile_config |=
 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 637280f..d79c639 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3789,3 +3789,23 @@
 		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
 	}
 }
+
+/**
+ * r600_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (R6xx-cayman).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ca87f7a..3dab49c 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -764,8 +764,10 @@
 	}
 
 	/* Check depth buffer */
-	if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
-		G_028800_Z_ENABLE(track->db_depth_control))) {
+	if (track->db_dirty &&
+	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
+	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+	     G_028800_Z_ENABLE(track->db_depth_control))) {
 		r = r600_cs_track_validate_db(p);
 		if (r)
 			return r;
@@ -1557,13 +1559,14 @@
 					      u32 tiling_flags)
 {
 	struct r600_cs_track *track = p->track;
-	u32 nfaces, llevel, blevel, w0, h0, d0;
-	u32 word0, word1, l0_size, mipmap_size, word2, word3;
+	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
+	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
 	u32 height_align, pitch, pitch_align, depth_align;
-	u32 array, barray, larray;
+	u32 barray, larray;
 	u64 base_align;
 	struct array_mode_checker array_check;
 	u32 format;
+	bool is_array;
 
 	/* on legacy kernel we don't perform advanced check */
 	if (p->rdev == NULL)
@@ -1581,12 +1584,28 @@
 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
 	}
 	word1 = radeon_get_ib_value(p, idx + 1);
+	word2 = radeon_get_ib_value(p, idx + 2) << 8;
+	word3 = radeon_get_ib_value(p, idx + 3) << 8;
+	word4 = radeon_get_ib_value(p, idx + 4);
+	word5 = radeon_get_ib_value(p, idx + 5);
+	dim = G_038000_DIM(word0);
 	w0 = G_038000_TEX_WIDTH(word0) + 1;
+	pitch = (G_038000_PITCH(word0) + 1) * 8;
 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
 	d0 = G_038004_TEX_DEPTH(word1);
+	format = G_038004_DATA_FORMAT(word1);
+	blevel = G_038010_BASE_LEVEL(word4);
+	llevel = G_038014_LAST_LEVEL(word5);
+	/* pitch in texels */
+	array_check.array_mode = G_038000_TILE_MODE(word0);
+	array_check.group_size = track->group_size;
+	array_check.nbanks = track->nbanks;
+	array_check.npipes = track->npipes;
+	array_check.nsamples = 1;
+	array_check.blocksize = r600_fmt_get_blocksize(format);
 	nfaces = 1;
-	array = 0;
-	switch (G_038000_DIM(word0)) {
+	is_array = false;
+	switch (dim) {
 	case V_038000_SQ_TEX_DIM_1D:
 	case V_038000_SQ_TEX_DIM_2D:
 	case V_038000_SQ_TEX_DIM_3D:
@@ -1599,29 +1618,25 @@
 		break;
 	case V_038000_SQ_TEX_DIM_1D_ARRAY:
 	case V_038000_SQ_TEX_DIM_2D_ARRAY:
-		array = 1;
+		is_array = true;
 		break;
-	case V_038000_SQ_TEX_DIM_2D_MSAA:
 	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		is_array = true;
+		/* fall through */
+	case V_038000_SQ_TEX_DIM_2D_MSAA:
+		array_check.nsamples = 1 << llevel;
+		llevel = 0;
+		break;
 	default:
 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
 		return -EINVAL;
 	}
-	format = G_038004_DATA_FORMAT(word1);
 	if (!r600_fmt_is_valid_texture(format, p->family)) {
 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
 			 __func__, __LINE__, format);
 		return -EINVAL;
 	}
 
-	/* pitch in texels */
-	pitch = (G_038000_PITCH(word0) + 1) * 8;
-	array_check.array_mode = G_038000_TILE_MODE(word0);
-	array_check.group_size = track->group_size;
-	array_check.nbanks = track->nbanks;
-	array_check.npipes = track->npipes;
-	array_check.nsamples = 1;
-	array_check.blocksize = r600_fmt_get_blocksize(format);
 	if (r600_get_array_mode_alignment(&array_check,
 					  &pitch_align, &height_align, &depth_align, &base_align)) {
 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1647,20 +1662,13 @@
 		return -EINVAL;
 	}
 
-	word2 = radeon_get_ib_value(p, idx + 2) << 8;
-	word3 = radeon_get_ib_value(p, idx + 3) << 8;
-
-	word0 = radeon_get_ib_value(p, idx + 4);
-	word1 = radeon_get_ib_value(p, idx + 5);
-	blevel = G_038010_BASE_LEVEL(word0);
-	llevel = G_038014_LAST_LEVEL(word1);
 	if (blevel > llevel) {
 		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
 			 blevel, llevel);
 	}
-	if (array == 1) {
-		barray = G_038014_BASE_ARRAY(word1);
-		larray = G_038014_LAST_ARRAY(word1);
+	if (is_array) {
+		barray = G_038014_BASE_ARRAY(word5);
+		larray = G_038014_LAST_ARRAY(word5);
 
 		nfaces = larray - barray + 1;
 	}
@@ -1677,7 +1685,6 @@
 		return -EINVAL;
 	}
 	/* using get ib will give us the offset into the mipmap bo */
-	word3 = radeon_get_ib_value(p, idx + 3) << 8;
 	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
 		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
 		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4b116ae..fd328f4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -602,6 +602,9 @@
 #define RLC_HB_WPTR                                       0x3f1c
 #define RLC_HB_WPTR_LSB_ADDR                              0x3f14
 #define RLC_HB_WPTR_MSB_ADDR                              0x3f18
+#define RLC_GPU_CLOCK_COUNT_LSB				  0x3f38
+#define RLC_GPU_CLOCK_COUNT_MSB				  0x3f3c
+#define RLC_CAPTURE_GPU_CLOCK_COUNT			  0x3f40
 #define RLC_MC_CNTL                                       0x3f44
 #define RLC_UCODE_CNTL                                    0x3f48
 #define RLC_UCODE_ADDR                                    0x3f2c
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5431af2..9930419 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -300,6 +300,7 @@
 	uint64_t			soffset;
 	uint64_t			eoffset;
 	uint32_t			flags;
+	struct radeon_fence		*fence;
 	bool				valid;
 };
 
@@ -1533,6 +1534,7 @@
 	unsigned 		debugfs_count;
 	/* virtual memory */
 	struct radeon_vm_manager	vm_manager;
+	struct mutex			gpu_clock_mutex;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -1733,11 +1735,11 @@
 #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
-#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
-#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
-#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
-#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
+#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
 
 /* Common functions */
 /* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f4af243..18c38d1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -255,13 +255,10 @@
  * rv515
  */
 struct rv515_mc_save {
-	u32 d1vga_control;
-	u32 d2vga_control;
 	u32 vga_render_control;
 	u32 vga_hdp_control;
-	u32 d1crtc_control;
-	u32 d2crtc_control;
 };
+
 int rv515_init(struct radeon_device *rdev);
 void rv515_fini(struct radeon_device *rdev);
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -371,6 +368,7 @@
 			unsigned num_gpu_pages,
 			struct radeon_sa_bo *vb);
 int r600_mc_wait_for_idle(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
 
 /*
  * rv770,rv730,rv710,rv740
@@ -389,11 +387,10 @@
  * evergreen
  */
 struct evergreen_mc_save {
-	u32 vga_control[6];
 	u32 vga_render_control;
 	u32 vga_hdp_control;
-	u32 crtc_control[6];
 };
+
 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
 int evergreen_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
@@ -472,5 +469,6 @@
 void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
 void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+uint64_t si_get_gpu_clock(struct radeon_device *rdev);
 
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index b1e3820..f9c21f9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1263,6 +1263,8 @@
 union igp_info {
 	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
 	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
 };
 
 bool radeon_atombios_sideport_present(struct radeon_device *rdev)
@@ -1390,27 +1392,50 @@
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
 	u16 data_offset, size;
-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+	union igp_info *igp_info;
 	u8 frev, crev;
 	u16 percentage = 0, rate = 0;
 
 	/* get any igp specific overrides */
 	if (atom_parse_data_header(mode_info->atom_context, index, &size,
 				   &frev, &crev, &data_offset)) {
-		igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+		igp_info = (union igp_info *)
 			(mode_info->atom_context->bios + data_offset);
-		switch (id) {
-		case ASIC_INTERNAL_SS_ON_TMDS:
-			percentage = le16_to_cpu(igp_info->usDVISSPercentage);
-			rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+		switch (crev) {
+		case 6:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
 			break;
-		case ASIC_INTERNAL_SS_ON_HDMI:
-			percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
-			rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+		case 7:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
 			break;
-		case ASIC_INTERNAL_SS_ON_LVDS:
-			percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
-			rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
 			break;
 		}
 		if (percentage)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 576f4f6..f75247d 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -719,6 +719,34 @@
 	return i2c;
 }
 
+static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_i2c_bus_rec i2c;
+	u16 offset;
+	u8 id, blocks, clk, data;
+	int i;
+
+	i2c.valid = false;
+
+	offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+	if (offset) {
+		blocks = RBIOS8(offset + 2);
+		for (i = 0; i < blocks; i++) {
+			id = RBIOS8(offset + 3 + (i * 5) + 0);
+			if (id == 136) {
+				clk = RBIOS8(offset + 3 + (i * 5) + 3);
+				data = RBIOS8(offset + 3 + (i * 5) + 4);
+				/* gpiopad */
+				i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+							    (1 << clk), (1 << data));
+				break;
+			}
+		}
+	}
+	return i2c;
+}
+
 void radeon_combios_i2c_init(struct radeon_device *rdev)
 {
 	struct drm_device *dev = rdev->ddev;
@@ -755,30 +783,14 @@
 	} else if (rdev->family == CHIP_RS300 ||
 		   rdev->family == CHIP_RS400 ||
 		   rdev->family == CHIP_RS480) {
-		u16 offset;
-		u8 id, blocks, clk, data;
-		int i;
-
 		/* 0x68 */
 		i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
 		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
 
-		offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
-		if (offset) {
-			blocks = RBIOS8(offset + 2);
-			for (i = 0; i < blocks; i++) {
-				id = RBIOS8(offset + 3 + (i * 5) + 0);
-				if (id == 136) {
-					clk = RBIOS8(offset + 3 + (i * 5) + 3);
-					data = RBIOS8(offset + 3 + (i * 5) + 4);
-					/* gpiopad */
-					i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
-								    (1 << clk), (1 << data));
-					rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
-					break;
-				}
-			}
-		}
+		/* gpiopad */
+		i2c = radeon_combios_get_i2c_info_from_table(rdev);
+		if (i2c.valid)
+			rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
 	} else if ((rdev->family == CHIP_R200) ||
 		   (rdev->family >= CHIP_R300)) {
 		/* 0x68 */
@@ -2321,7 +2333,10 @@
 			connector = (tmp >> 12) & 0xf;
 
 			ddc_type = (tmp >> 8) & 0xf;
-			ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+			if (ddc_type == 5)
+				ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
+			else
+				ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
 
 			switch (connector) {
 			case CONNECTOR_PROPRIETARY_LEGACY:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 8a4c49e..b4a0db24 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -278,6 +278,30 @@
 	return 0;
 }
 
+static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
+				  struct radeon_fence *fence)
+{
+	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_list *lobj;
+
+	if (parser->chunk_ib_idx == -1) {
+		return;
+	}
+	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
+		return;
+	}
+
+	list_for_each_entry(lobj, &parser->validated, tv.head) {
+		struct radeon_bo_va *bo_va;
+		struct radeon_bo *rbo = lobj->bo;
+
+		bo_va = radeon_bo_va(rbo, vm);
+		radeon_fence_unref(&bo_va->fence);
+		bo_va->fence = radeon_fence_ref(fence);
+	}
+}
+
 /**
  * cs_parser_fini() - clean parser states
  * @parser:	parser structure holding parsing context.
@@ -290,11 +314,14 @@
 {
 	unsigned i;
 
-	if (!error)
+	if (!error) {
+		/* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
+		radeon_bo_vm_fence_va(parser, parser->ib.fence);
 		ttm_eu_fence_buffer_objects(&parser->validated,
 					    parser->ib.fence);
-	else
+	} else {
 		ttm_eu_backoff_reservation(&parser->validated);
+	}
 
 	if (parser->relocs != NULL) {
 		for (i = 0; i < parser->nrelocs; i++) {
@@ -388,7 +415,6 @@
 
 	if (parser->chunk_ib_idx == -1)
 		return 0;
-
 	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
 		return 0;
 
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 711e95a..8794744 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -67,7 +67,8 @@
 
 	if (ASIC_IS_DCE4(rdev)) {
 		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
-		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
 	} else if (ASIC_IS_AVIVO(rdev)) {
 		WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
 		WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
@@ -94,7 +95,8 @@
 	if (ASIC_IS_DCE4(rdev)) {
 		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
 		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
-		       EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+		       EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
 	} else if (ASIC_IS_AVIVO(rdev)) {
 		WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
 		WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 742af82..d2e2438 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1009,6 +1009,7 @@
 	atomic_set(&rdev->ih.lock, 0);
 	mutex_init(&rdev->gem.mutex);
 	mutex_init(&rdev->pm.mutex);
+	mutex_init(&rdev->gpu_clock_mutex);
 	init_rwsem(&rdev->pm.mclk_lock);
 	init_rwsem(&rdev->exclusive_lock);
 	init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dcea6f0..d7269f4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -59,9 +59,12 @@
  *   2.15.0 - add max_pipes query
  *   2.16.0 - fix evergreen 2D tiled surface calculation
  *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
+ *   2.18.0 - r600-eg: allow "invalid" DB formats
+ *   2.19.0 - r600-eg: MSAA textures
+ *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	17
+#define KMS_DRIVER_MINOR	20
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b372005..bb3b7fe 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -814,7 +814,7 @@
 		return -EINVAL;
 	}
 
-	if (bo_va->valid)
+	if (bo_va->valid && mem)
 		return 0;
 
 	ngpu_pages = radeon_bo_ngpu_pages(bo);
@@ -859,11 +859,27 @@
 		     struct radeon_bo *bo)
 {
 	struct radeon_bo_va *bo_va;
+	int r;
 
 	bo_va = radeon_bo_va(bo, vm);
 	if (bo_va == NULL)
 		return 0;
 
+	/* wait for va use to end */
+	while (bo_va->fence) {
+		r = radeon_fence_wait(bo_va->fence, false);
+		if (r) {
+			DRM_ERROR("error while waiting for fence: %d\n", r);
+		}
+		if (r == -EDEADLK) {
+			r = radeon_gpu_reset(rdev);
+			if (!r)
+				continue;
+		}
+		break;
+	}
+	radeon_fence_unref(&bo_va->fence);
+
 	mutex_lock(&rdev->vm_manager.lock);
 	mutex_lock(&vm->mutex);
 	radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
@@ -934,7 +950,7 @@
 }
 
 /**
- * radeon_vm_init - tear down a vm instance
+ * radeon_vm_fini - tear down a vm instance
  *
  * @rdev: radeon_device pointer
  * @vm: requested vm
@@ -952,12 +968,15 @@
 	radeon_vm_unbind_locked(rdev, vm);
 	mutex_unlock(&rdev->vm_manager.lock);
 
-	/* remove all bo */
+	/* remove all bo at this point non are busy any more because unbind
+	 * waited for the last vm fence to signal
+	 */
 	r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
 	if (!r) {
 		bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
 		list_del_init(&bo_va->bo_list);
 		list_del_init(&bo_va->vm_list);
+		radeon_fence_unref(&bo_va->fence);
 		radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
 		kfree(bo_va);
 	}
@@ -969,6 +988,7 @@
 		r = radeon_bo_reserve(bo_va->bo, false);
 		if (!r) {
 			list_del_init(&bo_va->bo_list);
+			radeon_fence_unref(&bo_va->fence);
 			radeon_bo_unreserve(bo_va->bo);
 			kfree(bo_va);
 		}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 84d0452..1b57b00 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -134,25 +134,16 @@
 	struct radeon_device *rdev = rbo->rdev;
 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
 	struct radeon_vm *vm = &fpriv->vm;
-	struct radeon_bo_va *bo_va, *tmp;
 
 	if (rdev->family < CHIP_CAYMAN) {
 		return;
 	}
 
 	if (radeon_bo_reserve(rbo, false)) {
+		dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
 		return;
 	}
-	list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
-		if (bo_va->vm == vm) {
-			/* remove from this vm address space */
-			mutex_lock(&vm->mutex);
-			list_del(&bo_va->vm_list);
-			mutex_unlock(&vm->mutex);
-			list_del(&bo_va->bo_list);
-			kfree(bo_va);
-		}
-	}
+	radeon_vm_bo_rmv(rdev, vm, rbo);
 	radeon_bo_unreserve(rbo);
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 1d73f16..414b4ac 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -29,6 +29,7 @@
 #include "drm_sarea.h"
 #include "radeon.h"
 #include "radeon_drm.h"
+#include "radeon_asic.h"
 
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
@@ -167,17 +168,39 @@
 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct radeon_device *rdev = dev->dev_private;
-	struct drm_radeon_info *info;
+	struct drm_radeon_info *info = data;
 	struct radeon_mode_info *minfo = &rdev->mode_info;
-	uint32_t *value_ptr;
-	uint32_t value;
+	uint32_t value, *value_ptr;
+	uint64_t value64, *value_ptr64;
 	struct drm_crtc *crtc;
 	int i, found;
 
-	info = data;
+	/* TIMESTAMP is a 64-bit value, needs special handling. */
+	if (info->request == RADEON_INFO_TIMESTAMP) {
+		if (rdev->family >= CHIP_R600) {
+			value_ptr64 = (uint64_t*)((unsigned long)info->value);
+			if (rdev->family >= CHIP_TAHITI) {
+				value64 = si_get_gpu_clock(rdev);
+			} else {
+				value64 = r600_get_gpu_clock(rdev);
+			}
+
+			if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
+				DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+				return -EFAULT;
+			}
+			return 0;
+		} else {
+			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+			return -EINVAL;
+		}
+	}
+
 	value_ptr = (uint32_t *)((unsigned long)info->value);
-	if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
+	if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
+		DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
 		return -EFAULT;
+	}
 
 	switch (info->request) {
 	case RADEON_INFO_DEVICE_ID:
@@ -337,7 +360,7 @@
 		return -EINVAL;
 	}
 	if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
-		DRM_ERROR("copy_to_user\n");
+		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
 		return -EFAULT;
 	}
 	return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index d5fd615..94b4a1c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1025,9 +1025,11 @@
 
 static void radeon_crtc_prepare(struct drm_crtc *crtc)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct drm_crtc *crtci;
 
+	radeon_crtc->in_mode_set = true;
 	/*
 	* The hardware wedges sometimes if you reconfigure one CRTC
 	* whilst another is running (see fdo bug #24611).
@@ -1038,6 +1040,7 @@
 
 static void radeon_crtc_commit(struct drm_crtc *crtc)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct drm_crtc *crtci;
 
@@ -1048,6 +1051,7 @@
 		if (crtci->enabled)
 			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
 	}
+	radeon_crtc->in_mode_set = false;
 }
 
 static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f380d59..d569789 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -275,6 +275,7 @@
 	u16 lut_r[256], lut_g[256], lut_b[256];
 	bool enabled;
 	bool can_tile;
+	bool in_mode_set;
 	uint32_t crtc_offset;
 	struct drm_gem_object *cursor_bo;
 	uint64_t cursor_addr;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f1a4c8..1cb014b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,11 +52,7 @@
 
 	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
 		/* remove from all vm address space */
-		mutex_lock(&bo_va->vm->mutex);
-		list_del(&bo_va->vm_list);
-		mutex_unlock(&bo_va->vm->mutex);
-		list_del(&bo_va->bo_list);
-		kfree(bo_va);
+		radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index a12fbcc..aa8ef49 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -281,12 +281,8 @@
 
 void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
-	save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
-	save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
 	save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
 	save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
-	save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
-	save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
 
 	/* Stop all video */
 	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
@@ -311,15 +307,6 @@
 	/* Unlock host access */
 	WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
 	mdelay(1);
-	/* Restore video state */
-	WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
-	WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
-	WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
-	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
-	WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
-	WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
-	WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
-	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
 	WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index c053f81..0139e22 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1639,11 +1639,19 @@
 		/* XXX what about 12? */
 		rdev->config.si.tile_config |= (3 << 0);
 		break;
-	}
-	if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-		rdev->config.si.tile_config |= 1 << 4;
-	else
+	}	
+	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+	case 0: /* four banks */
 		rdev->config.si.tile_config |= 0 << 4;
+		break;
+	case 1: /* eight banks */
+		rdev->config.si.tile_config |= 1 << 4;
+		break;
+	case 2: /* sixteen banks */
+	default:
+		rdev->config.si.tile_config |= 2 << 4;
+		break;
+	}
 	rdev->config.si.tile_config |=
 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
 	rdev->config.si.tile_config |=
@@ -3960,3 +3968,22 @@
 	rdev->bios = NULL;
 }
 
+/**
+ * si_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7869089..ef4815c 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -698,6 +698,9 @@
 #define RLC_UCODE_ADDR                                    0xC32C
 #define RLC_UCODE_DATA                                    0xC330
 
+#define RLC_GPU_CLOCK_COUNT_LSB                           0xC338
+#define RLC_GPU_CLOCK_COUNT_MSB                           0xC33C
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0xC340
 #define RLC_MC_CNTL                                       0xC344
 #define RLC_UCODE_CNTL                                    0xC348
 
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 0b5e096..56e0bf3 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -1,6 +1,7 @@
 config DRM_UDL
 	tristate "DisplayLink"
 	depends on DRM && EXPERIMENTAL
+	depends on USB_ARCH_HAS_HCD
 	select DRM_USB
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 7bd65bd..291ecc1 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -308,7 +308,7 @@
 	/* need to attach */
 	attach = dma_buf_attach(dma_buf, dev->dev);
 	if (IS_ERR(attach))
-		return ERR_PTR(PTR_ERR(attach));
+		return ERR_CAST(attach);
 
 	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
 	if (IS_ERR(sg)) {
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 5b3c7d1..e25cf31 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -70,27 +70,12 @@
 	.clients = LIST_HEAD_INIT(vgasr_priv.clients),
 };
 
-int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
+static bool vga_switcheroo_ready(void)
 {
-	mutex_lock(&vgasr_mutex);
-	if (vgasr_priv.handler) {
-		mutex_unlock(&vgasr_mutex);
-		return -EINVAL;
-	}
-
-	vgasr_priv.handler = handler;
-	mutex_unlock(&vgasr_mutex);
-	return 0;
+	/* we're ready if we get two clients + handler */
+	return !vgasr_priv.active &&
+	       vgasr_priv.registered_clients == 2 && vgasr_priv.handler;
 }
-EXPORT_SYMBOL(vga_switcheroo_register_handler);
-
-void vga_switcheroo_unregister_handler(void)
-{
-	mutex_lock(&vgasr_mutex);
-	vgasr_priv.handler = NULL;
-	mutex_unlock(&vgasr_mutex);
-}
-EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
 
 static void vga_switcheroo_enable(void)
 {
@@ -98,7 +83,8 @@
 	struct vga_switcheroo_client *client;
 
 	/* call the handler to init */
-	vgasr_priv.handler->init();
+	if (vgasr_priv.handler->init)
+		vgasr_priv.handler->init();
 
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
 		if (client->id != -1)
@@ -113,6 +99,37 @@
 	vgasr_priv.active = true;
 }
 
+int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
+{
+	mutex_lock(&vgasr_mutex);
+	if (vgasr_priv.handler) {
+		mutex_unlock(&vgasr_mutex);
+		return -EINVAL;
+	}
+
+	vgasr_priv.handler = handler;
+	if (vga_switcheroo_ready()) {
+		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		vga_switcheroo_enable();
+	}
+	mutex_unlock(&vgasr_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(vga_switcheroo_register_handler);
+
+void vga_switcheroo_unregister_handler(void)
+{
+	mutex_lock(&vgasr_mutex);
+	vgasr_priv.handler = NULL;
+	if (vgasr_priv.active) {
+		pr_info("vga_switcheroo: disabled\n");
+		vga_switcheroo_debugfs_fini(&vgasr_priv);
+		vgasr_priv.active = false;
+	}
+	mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
+
 static int register_client(struct pci_dev *pdev,
 			   const struct vga_switcheroo_client_ops *ops,
 			   int id, bool active)
@@ -134,9 +151,7 @@
 	if (client_is_vga(client))
 		vgasr_priv.registered_clients++;
 
-	/* if we get two clients + handler */
-	if (!vgasr_priv.active &&
-	    vgasr_priv.registered_clients == 2 && vgasr_priv.handler) {
+	if (vga_switcheroo_ready()) {
 		printk(KERN_INFO "vga_switcheroo: enabled\n");
 		vga_switcheroo_enable();
 	}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index faa16f8..0fa356f 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -196,7 +196,7 @@
 	int tjmax;
 };
 
-static struct tjmax __cpuinitconst tjmax_table[] = {
+static const struct tjmax __cpuinitconst tjmax_table[] = {
 	{ "CPU D410", 100000 },
 	{ "CPU D425", 100000 },
 	{ "CPU D510", 100000 },
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index ab48252..5b1a6a6 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1206,7 +1206,7 @@
 	int err = -ENODEV;
 	u16 val;
 
-	static const __initdata char *names[] = {
+	static __initconst char *const names[] = {
 		"W83627HF",
 		"W83627THF",
 		"W83697HF",
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f559088..e872617 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -606,8 +606,9 @@
 	intel_idle_cpuidle_driver_init();
 	retval = cpuidle_register_driver(&intel_idle_driver);
 	if (retval) {
+		struct cpuidle_driver *drv = cpuidle_get_driver();
 		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
-			cpuidle_get_driver()->name);
+			drv ? drv->name : "none");
 		return retval;
 	}
 
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 59fbb3ae..e35bb8f 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -129,7 +129,7 @@
 {
 	struct adf4350_platform_data *pdata = st->pdata;
 	u64 tmp;
-	u32 div_gcd, prescaler;
+	u32 div_gcd, prescaler, chspc;
 	u16 mdiv, r_cnt = 0;
 	u8 band_sel_div;
 
@@ -158,14 +158,20 @@
 	if (pdata->ref_div_factor)
 		r_cnt = pdata->ref_div_factor - 1;
 
-	do  {
-		r_cnt = adf4350_tune_r_cnt(st, r_cnt);
+	chspc = st->chspc;
 
-		st->r1_mod = st->fpfd / st->chspc;
-		while (st->r1_mod > ADF4350_MAX_MODULUS) {
-			r_cnt = adf4350_tune_r_cnt(st, r_cnt);
-			st->r1_mod = st->fpfd / st->chspc;
-		}
+	do  {
+		do {
+			do {
+				r_cnt = adf4350_tune_r_cnt(st, r_cnt);
+				st->r1_mod = st->fpfd / chspc;
+				if (r_cnt > ADF4350_MAX_R_CNT) {
+					/* try higher spacing values */
+					chspc++;
+					r_cnt = 0;
+				}
+			} while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
+		} while (r_cnt == 0);
 
 		tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);
 		do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
@@ -194,7 +200,7 @@
 	st->regs[ADF4350_REG0] = ADF4350_REG0_INT(st->r0_int) |
 				 ADF4350_REG0_FRACT(st->r0_fract);
 
-	st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(0) |
+	st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(1) |
 				 ADF4350_REG1_MOD(st->r1_mod) |
 				 prescaler;
 
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 1cbb449..9a99f43 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -271,9 +271,10 @@
 	const unsigned long *scan_mask)
 {
 	struct adjd_s311_data *data = iio_priv(indio_dev);
-	data->buffer = krealloc(data->buffer, indio_dev->scan_bytes,
-				GFP_KERNEL);
-	if (!data->buffer)
+
+	kfree(data->buffer);
+	data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+	if (data->buffer == NULL)
 		return -ENOMEM;
 
 	return 0;
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index c3e7bac..e45712a9 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -404,7 +404,7 @@
 	return ret;
 }
 
-static int show_thresh_either_en(struct device *dev,
+static ssize_t show_thresh_either_en(struct device *dev,
 					struct device_attribute *attr,
 					char *buf)
 {
@@ -424,7 +424,7 @@
 	return scnprintf(buf, PAGE_SIZE, "%u\n", enable);
 }
 
-static int store_thresh_either_en(struct device *dev,
+static ssize_t store_thresh_either_en(struct device *dev,
 					struct device_attribute *attr,
 					const char *buf, size_t len)
 {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 6bf8504..055ed59 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -267,6 +267,7 @@
 	if (!uevent)
 		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
 
+	mutex_lock(&ctx->file->mut);
 	uevent->cm_id = cm_id;
 	ucma_set_event_context(ctx, event, uevent);
 	uevent->resp.event = event->event;
@@ -277,7 +278,6 @@
 		ucma_copy_conn_event(&uevent->resp.param.conn,
 				     &event->param.conn);
 
-	mutex_lock(&ctx->file->mut);
 	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 		if (!ctx->backlog) {
 			ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 8c81992..e4a7315 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -439,7 +439,7 @@
 
 /*
  * Called by c2_probe to initialize the RNIC. This principally
- * involves initalizing the various limits and resouce pools that
+ * involves initializing the various limits and resource pools that
  * comprise the RNIC instance.
  */
 int __devinit c2_rnic_init(struct c2_dev *c2dev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 77b6b18..aaf88ef9 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1680,7 +1680,7 @@
  * T3A does 3 things when a TERM is received:
  * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
  * 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcde cqe into the associated CQ.
+ * 3) post a TERMINATE opcode cqe into the associated CQ.
  *
  * For (1), we save the message in the qp for later consumer consumption.
  * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c27141f..9c2ae7e 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -125,6 +125,7 @@
 {
 	struct ib_ah *new_ah;
 	struct ib_ah_attr ah_attr;
+	unsigned long flags;
 
 	if (!dev->send_agent[port_num - 1][0])
 		return;
@@ -139,11 +140,11 @@
 	if (IS_ERR(new_ah))
 		return;
 
-	spin_lock(&dev->sm_lock);
+	spin_lock_irqsave(&dev->sm_lock, flags);
 	if (dev->sm_ah[port_num - 1])
 		ib_destroy_ah(dev->sm_ah[port_num - 1]);
 	dev->sm_ah[port_num - 1] = new_ah;
-	spin_unlock(&dev->sm_lock);
+	spin_unlock_irqrestore(&dev->sm_lock, flags);
 }
 
 /*
@@ -197,13 +198,15 @@
 static void node_desc_override(struct ib_device *dev,
 			       struct ib_mad *mad)
 {
+	unsigned long flags;
+
 	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
 	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
 	    mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
 	    mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
-		spin_lock(&to_mdev(dev)->sm_lock);
+		spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
 		memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
-		spin_unlock(&to_mdev(dev)->sm_lock);
+		spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
 	}
 }
 
@@ -213,6 +216,7 @@
 	struct ib_mad_send_buf *send_buf;
 	struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
 	int ret;
+	unsigned long flags;
 
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
@@ -225,13 +229,13 @@
 		 * wrong following the IB spec strictly, but we know
 		 * it's OK for our devices).
 		 */
-		spin_lock(&dev->sm_lock);
+		spin_lock_irqsave(&dev->sm_lock, flags);
 		memcpy(send_buf->mad, mad, sizeof *mad);
 		if ((send_buf->ah = dev->sm_ah[port_num - 1]))
 			ret = ib_post_send_mad(send_buf, NULL);
 		else
 			ret = -EINVAL;
-		spin_unlock(&dev->sm_lock);
+		spin_unlock_irqrestore(&dev->sm_lock, flags);
 
 		if (ret)
 			ib_free_send_mad(send_buf);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index fe2088c..cc05579 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -423,6 +423,7 @@
 				 struct ib_device_modify *props)
 {
 	struct mlx4_cmd_mailbox *mailbox;
+	unsigned long flags;
 
 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
 		return -EOPNOTSUPP;
@@ -430,9 +431,9 @@
 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
 		return 0;
 
-	spin_lock(&to_mdev(ibdev)->sm_lock);
+	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
 	memcpy(ibdev->node_desc, props->node_desc, 64);
-	spin_unlock(&to_mdev(ibdev)->sm_lock);
+	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
 
 	/*
 	 * If possible, pass node desc to FW, so it can generate
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a6d8ea0..f585edd 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1407,6 +1407,7 @@
 	struct mlx4_wqe_mlx_seg *mlx = wqe;
 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
 	struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+	struct net_device *ndev;
 	union ib_gid sgid;
 	u16 pkey;
 	int send_size;
@@ -1483,7 +1484,10 @@
 
 		memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
 		/* FIXME: cache smac value? */
-		smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+		ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
+		if (!ndev)
+			return -ENODEV;
+		smac = ndev->dev_addr;
 		memcpy(sqp->ud_header.eth.smac_h, smac, 6);
 		if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
 			mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 5a04452..c4e0131 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -161,7 +161,7 @@
 	ocrdma_get_guid(dev, &sgid->raw[8]);
 }
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
 {
 	struct net_device *netdev, *tmp;
@@ -202,14 +202,13 @@
 	return 0;
 }
 
-#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q)
+#if IS_ENABLED(CONFIG_IPV6)
 
 static int ocrdma_inet6addr_event(struct notifier_block *notifier,
 				  unsigned long event, void *ptr)
 {
 	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
-	struct net_device *event_netdev = ifa->idev->dev;
-	struct net_device *netdev = NULL;
+	struct net_device *netdev = ifa->idev->dev;
 	struct ib_event gid_event;
 	struct ocrdma_dev *dev;
 	bool found = false;
@@ -217,11 +216,12 @@
 	bool is_vlan = false;
 	u16 vid = 0;
 
-	netdev = vlan_dev_real_dev(event_netdev);
-	if (netdev != event_netdev) {
-		is_vlan = true;
-		vid = vlan_dev_vlan_id(event_netdev);
+	is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
+	if (is_vlan) {
+		vid = vlan_dev_vlan_id(netdev);
+		netdev = vlan_dev_real_dev(netdev);
 	}
+
 	rcu_read_lock();
 	list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
 		if (dev->nic_info.netdev == netdev) {
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 0d7280a..3f6b21e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6346,8 +6346,10 @@
 			dd->piobcnt4k * dd->align4k;
 		dd->piovl15base	= ioremap_nocache(vl15off,
 						  NUM_VL15_BUFS * dd->align4k);
-		if (!dd->piovl15base)
+		if (!dd->piovl15base) {
+			ret = -ENOMEM;
 			goto bail;
+		}
 	}
 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
 
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index a322d51..50a8a0d 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -372,7 +372,7 @@
 		/* Read CTRL reg for each channel to check TRIMDONE */
 		if (baduns & (1 << chn)) {
 			qib_dev_err(dd,
-				"Reseting TRIMDONE on chn %d (%s)\n",
+				"Resetting TRIMDONE on chn %d (%s)\n",
 				chn, where);
 			ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
 				IB_CTRL2(chn), 0x10, 0x10);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 95ecf4e..24683fd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1271,12 +1271,15 @@
 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
+	unsigned long flags;
 	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
+		spin_lock_irqsave(&priv->lock, flags);
 		list_move(&tx->list, &priv->cm.reap_list);
 		queue_work(ipoib_workqueue, &priv->cm.reap_task);
 		ipoib_dbg(priv, "Reap connection for gid %pI6\n",
 			  tx->neigh->daddr + 4);
 		tx->neigh = NULL;
+		spin_unlock_irqrestore(&priv->lock, flags);
 	}
 }
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 97920b7..3e2085a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1052,7 +1052,7 @@
 	for (n = rcu_dereference_protected(*np,
 					    lockdep_is_held(&ntbl->rwlock));
 	     n != NULL;
-	     n = rcu_dereference_protected(neigh->hnext,
+	     n = rcu_dereference_protected(*np,
 					lockdep_is_held(&ntbl->rwlock))) {
 		if (n == neigh) {
 			/* found */
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index bcbf22e..1b5b0c7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -586,24 +586,62 @@
 			scmnd->sc_data_direction);
 }
 
-static void srp_remove_req(struct srp_target_port *target,
-			   struct srp_request *req, s32 req_lim_delta)
+/**
+ * srp_claim_req - Take ownership of the scmnd associated with a request.
+ * @target: SRP target port.
+ * @req: SRP request.
+ * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
+ *         ownership of @req->scmnd if it equals @scmnd.
+ *
+ * Return value:
+ * Either NULL or a pointer to the SCSI command the caller became owner of.
+ */
+static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
+				       struct srp_request *req,
+				       struct scsi_cmnd *scmnd)
 {
 	unsigned long flags;
 
-	srp_unmap_data(req->scmnd, target, req);
+	spin_lock_irqsave(&target->lock, flags);
+	if (!scmnd) {
+		scmnd = req->scmnd;
+		req->scmnd = NULL;
+	} else if (req->scmnd == scmnd) {
+		req->scmnd = NULL;
+	} else {
+		scmnd = NULL;
+	}
+	spin_unlock_irqrestore(&target->lock, flags);
+
+	return scmnd;
+}
+
+/**
+ * srp_free_req() - Unmap data and add request to the free request list.
+ */
+static void srp_free_req(struct srp_target_port *target,
+			 struct srp_request *req, struct scsi_cmnd *scmnd,
+			 s32 req_lim_delta)
+{
+	unsigned long flags;
+
+	srp_unmap_data(scmnd, target, req);
+
 	spin_lock_irqsave(&target->lock, flags);
 	target->req_lim += req_lim_delta;
-	req->scmnd = NULL;
 	list_add_tail(&req->list, &target->free_reqs);
 	spin_unlock_irqrestore(&target->lock, flags);
 }
 
 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 {
-	req->scmnd->result = DID_RESET << 16;
-	req->scmnd->scsi_done(req->scmnd);
-	srp_remove_req(target, req, 0);
+	struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
+
+	if (scmnd) {
+		scmnd->result = DID_RESET << 16;
+		scmnd->scsi_done(scmnd);
+		srp_free_req(target, req, scmnd, 0);
+	}
 }
 
 static int srp_reconnect_target(struct srp_target_port *target)
@@ -1073,11 +1111,18 @@
 		complete(&target->tsk_mgmt_done);
 	} else {
 		req = &target->req_ring[rsp->tag];
-		scmnd = req->scmnd;
-		if (!scmnd)
+		scmnd = srp_claim_req(target, req, NULL);
+		if (!scmnd) {
 			shost_printk(KERN_ERR, target->scsi_host,
 				     "Null scmnd for RSP w/tag %016llx\n",
 				     (unsigned long long) rsp->tag);
+
+			spin_lock_irqsave(&target->lock, flags);
+			target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+			spin_unlock_irqrestore(&target->lock, flags);
+
+			return;
+		}
 		scmnd->result = rsp->status;
 
 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
@@ -1092,7 +1137,9 @@
 		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
 
-		srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+		srp_free_req(target, req, scmnd,
+			     be32_to_cpu(rsp->req_lim_delta));
+
 		scmnd->host_scribble = NULL;
 		scmnd->scsi_done(scmnd);
 	}
@@ -1631,25 +1678,17 @@
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
-	int ret = SUCCESS;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
 
-	if (!req || target->qp_in_error)
+	if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
 		return FAILED;
-	if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
-			      SRP_TSK_ABORT_TASK))
-		return FAILED;
+	srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+			  SRP_TSK_ABORT_TASK);
+	srp_free_req(target, req, scmnd, 0);
+	scmnd->result = DID_ABORT << 16;
 
-	if (req->scmnd) {
-		if (!target->tsk_mgmt_status) {
-			srp_remove_req(target, req, 0);
-			scmnd->result = DID_ABORT << 16;
-		} else
-			ret = FAILED;
-	}
-
-	return ret;
+	return SUCCESS;
 }
 
 static int srp_reset_device(struct scsi_cmnd *scmnd)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 7a0ce8d..9e1449f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1469,7 +1469,7 @@
  *
  * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
  * the data that has been transferred via IB RDMA had to be postponed until the
- * check_stop_free() callback.  None of this is nessecary anymore and needs to
+ * check_stop_free() callback.  None of this is necessary anymore and needs to
  * be cleaned up.
  */
 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 503c709..908407e 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -48,7 +48,7 @@
 	struct input_dev *input;
 	struct work_struct work;
 	struct mutex mutex;
-	int irq, irq_active_high;
+	int irq_gpio, irq, irq_active_high;
 };
 
 #define EETI_TS_BITDEPTH	(11)
@@ -62,7 +62,7 @@
 
 static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv)
 {
-	return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high;
+	return gpio_get_value(priv->irq_gpio) == priv->irq_active_high;
 }
 
 static void eeti_ts_read(struct work_struct *work)
@@ -157,7 +157,7 @@
 static int __devinit eeti_ts_probe(struct i2c_client *client,
 				   const struct i2c_device_id *idp)
 {
-	struct eeti_ts_platform_data *pdata;
+	struct eeti_ts_platform_data *pdata = client->dev.platform_data;
 	struct eeti_ts_priv *priv;
 	struct input_dev *input;
 	unsigned int irq_flags;
@@ -199,9 +199,12 @@
 
 	priv->client = client;
 	priv->input = input;
-	priv->irq = client->irq;
+	priv->irq_gpio = pdata->irq_gpio;
+	priv->irq = gpio_to_irq(pdata->irq_gpio);
 
-	pdata = client->dev.platform_data;
+	err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
+	if (err < 0)
+		goto err1;
 
 	if (pdata)
 		priv->irq_active_high = pdata->irq_active_high;
@@ -215,13 +218,13 @@
 
 	err = input_register_device(input);
 	if (err)
-		goto err1;
+		goto err2;
 
 	err = request_irq(priv->irq, eeti_ts_isr, irq_flags,
 			  client->name, priv);
 	if (err) {
 		dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
-		goto err2;
+		goto err3;
 	}
 
 	/*
@@ -233,9 +236,11 @@
 	device_init_wakeup(&client->dev, 0);
 	return 0;
 
-err2:
+err3:
 	input_unregister_device(input);
 	input = NULL; /* so we dont try to free it below */
+err2:
+	gpio_free(pdata->irq_gpio);
 err1:
 	input_free_device(input);
 	kfree(priv);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6d1cbdf..b64502d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -296,8 +296,13 @@
 	} else
 		dma_pdev = pci_dev_get(pdev);
 
+	/* Account for quirked devices */
 	swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
 
+	/*
+	 * If it's a multifunction device that does not support our
+	 * required ACS flags, add to the same group as function 0.
+	 */
 	if (dma_pdev->multifunction &&
 	    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
 		swap_pci_ref(&dma_pdev,
@@ -305,14 +310,28 @@
 					  PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
 					  0)));
 
+	/*
+	 * Devices on the root bus go through the iommu.  If that's not us,
+	 * find the next upstream device and test ACS up to the root bus.
+	 * Finding the next device may require skipping virtual buses.
+	 */
 	while (!pci_is_root_bus(dma_pdev->bus)) {
-		if (pci_acs_path_enabled(dma_pdev->bus->self,
-					 NULL, REQ_ACS_FLAGS))
+		struct pci_bus *bus = dma_pdev->bus;
+
+		while (!bus->self) {
+			if (!pci_is_root_bus(bus))
+				bus = bus->parent;
+			else
+				goto root_bus;
+		}
+
+		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
 			break;
 
-		swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+		swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
 	}
 
+root_bus:
 	group = iommu_group_get(&dma_pdev->dev);
 	pci_dev_put(dma_pdev);
 	if (!group) {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 500e7f1..18a89b7 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1111,7 +1111,7 @@
 
 		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
 			pr_info("AMD-Vi:  Extended features: ");
-			for (i = 0; ARRAY_SIZE(feat_str); ++i) {
+			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
 				if (iommu_feature(iommu, (1ULL << i)))
 					pr_cont(" %s", feat_str[i]);
 			}
@@ -1131,9 +1131,6 @@
 			break;
 	}
 
-	/* Make sure ACS will be enabled */
-	pci_request_acs();
-
 	ret = amd_iommu_init_devices();
 
 	print_iommu_info();
@@ -1652,6 +1649,9 @@
 
 	early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
 
+	/* Make sure ACS will be enabled during PCI probe */
+	pci_request_acs();
+
 	return true;
 }
 
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 45350ff..80bad32 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -732,9 +732,9 @@
 	spin_lock_init(&priv->pgtablelock);
 	INIT_LIST_HEAD(&priv->clients);
 
-	dom->geometry.aperture_start = 0;
-	dom->geometry.aperture_end   = ~0UL;
-	dom->geometry.force_aperture = true;
+	domain->geometry.aperture_start = 0;
+	domain->geometry.aperture_end   = ~0UL;
+	domain->geometry.force_aperture = true;
 
 	domain->priv = priv;
 	return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 7469b53..2297ec1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2008,6 +2008,7 @@
 	if (!drhd) {
 		printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
 			pci_name(pdev));
+		free_domain_mem(domain);
 		return NULL;
 	}
 	iommu = drhd->iommu;
@@ -4124,8 +4125,13 @@
 	} else
 		dma_pdev = pci_dev_get(pdev);
 
+	/* Account for quirked devices */
 	swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
 
+	/*
+	 * If it's a multifunction device that does not support our
+	 * required ACS flags, add to the same group as function 0.
+	 */
 	if (dma_pdev->multifunction &&
 	    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
 		swap_pci_ref(&dma_pdev,
@@ -4133,14 +4139,28 @@
 					  PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
 					  0)));
 
+	/*
+	 * Devices on the root bus go through the iommu.  If that's not us,
+	 * find the next upstream device and test ACS up to the root bus.
+	 * Finding the next device may require skipping virtual buses.
+	 */
 	while (!pci_is_root_bus(dma_pdev->bus)) {
-		if (pci_acs_path_enabled(dma_pdev->bus->self,
-					 NULL, REQ_ACS_FLAGS))
+		struct pci_bus *bus = dma_pdev->bus;
+
+		while (!bus->self) {
+			if (!pci_is_root_bus(bus))
+				bus = bus->parent;
+			else
+				goto root_bus;
+		}
+
+		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
 			break;
 
-		swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+		swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
 	}
 
+root_bus:
 	group = iommu_group_get(&dma_pdev->dev);
 	pci_dev_put(dma_pdev);
 	if (!group) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index e0b18f3..af8904d 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -736,6 +736,7 @@
 {
 	struct dmar_drhd_unit *drhd;
 	int ir_supported = 0;
+	int ioapic_idx;
 
 	for_each_drhd_unit(drhd) {
 		struct intel_iommu *iommu = drhd->iommu;
@@ -748,13 +749,20 @@
 		}
 	}
 
-	if (ir_supported && ir_ioapic_num != nr_ioapics) {
-		printk(KERN_WARNING
-		       "Not all IO-APIC's listed under remapping hardware\n");
-		return -1;
+	if (!ir_supported)
+		return 0;
+
+	for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
+		int ioapic_id = mpc_ioapic_id(ioapic_idx);
+		if (!map_ioapic_to_ir(ioapic_id)) {
+			pr_err(FW_BUG "ioapic %d has no mapping iommu, "
+			       "interrupt remapping will be disabled\n",
+			       ioapic_id);
+			return -1;
+		}
 	}
 
-	return ir_supported;
+	return 1;
 }
 
 int __init ir_dev_scope_init(void)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4ba325a..2a4bb36 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -799,14 +799,14 @@
 			goto out;
 		}
 	}
-	dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
+	dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
 out:
 	spin_unlock(&as->client_lock);
 }
 
 static int smmu_iommu_domain_init(struct iommu_domain *domain)
 {
-	int i, err = -ENODEV;
+	int i, err = -EAGAIN;
 	unsigned long flags;
 	struct smmu_as *as;
 	struct smmu_device *smmu = smmu_handle;
@@ -814,11 +814,14 @@
 	/* Look for a free AS with lock held */
 	for  (i = 0; i < smmu->num_as; i++) {
 		as = &smmu->as[i];
-		if (!as->pdir_page) {
-			err = alloc_pdir(as);
-			if (!err)
-				goto found;
-		}
+
+		if (as->pdir_page)
+			continue;
+
+		err = alloc_pdir(as);
+		if (!err)
+			goto found;
+
 		if (err != -EAGAIN)
 			break;
 	}
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 5405ec6..baf2686 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -16,7 +16,6 @@
 #include <linux/sched.h>
 #include "isdnloop.h"
 
-static char *revision = "$Revision: 1.11.6.7 $";
 static char *isdnloop_id = "loop0";
 
 MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1494,17 +1493,6 @@
 static int __init
 isdnloop_init(void)
 {
-	char *p;
-	char rev[10];
-
-	if ((p = strchr(revision, ':'))) {
-		strcpy(rev, p + 1);
-		p = strchr(rev, '$');
-		*p = 0;
-	} else
-		strcpy(rev, " ??? ");
-	printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
-
 	if (isdnloop_id)
 		return (isdnloop_addcard(isdnloop_id));
 
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 0dc8abc..949cabb 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2222,7 +2222,7 @@
 	InitWin(l2);
 	l2->l2m.fsm = &l2fsm;
 	if (test_bit(FLG_LAPB, &l2->flag) ||
-	    test_bit(FLG_PTP, &l2->flag) ||
+	    test_bit(FLG_FIXED_TEI, &l2->flag) ||
 	    test_bit(FLG_LAPD_NET, &l2->flag))
 		l2->l2m.state = ST_L2_4;
 	else
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6157cbb..363975b 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -224,7 +224,7 @@
 		struct led_classdev *led_cdev;
 
 		led_cdev = list_entry(entry, struct led_classdev, trig_list);
-		led_set_brightness(led_cdev, brightness);
+		__led_set_brightness(led_cdev, brightness);
 	}
 	read_unlock(&trig->leddev_list_lock);
 }
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 53bd136..0ade6eb 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -63,7 +63,7 @@
 	/* scale configuration */
 	addr = LP8788_ISINK_CTRL;
 	mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
-	val = cfg->scale << cfg->num;
+	val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
 	ret = lp8788_update_bits(led->lp, addr, mask, val);
 	if (ret)
 		return ret;
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 9ee12c2..771ea06 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -247,7 +247,7 @@
 
 	if (!cfg) {
 		dev_err(&pdev->dev, "missing platform data\n");
-		goto err0;
+		return -ENODEV;
 	}
 
 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fcd0987..3f6203a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1108,8 +1108,11 @@
 			ret = 0;
 	}
 	rdev->sectors = rdev->sb_start;
-	/* Limit to 4TB as metadata cannot record more than that */
-	if (rdev->sectors >= (2ULL << 32))
+	/* Limit to 4TB as metadata cannot record more than that.
+	 * (not needed for Linear and RAID0 as metadata doesn't
+	 * record this size)
+	 */
+	if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
 		rdev->sectors = (2ULL << 32) - 2;
 
 	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1400,7 +1403,7 @@
 	/* Limit to 4TB as metadata cannot record more than that.
 	 * 4TB == 2^32 KB, or 2*2^32 sectors.
 	 */
-	if (num_sectors >= (2ULL << 32))
+	if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
 		num_sectors = (2ULL << 32) - 2;
 	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
 		       rdev->sb_page);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index de5ed6f..1c2eb38 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -659,7 +659,11 @@
 		max = biovec->bv_len;
 
 	if (mddev->merge_check_needed) {
-		struct r10bio r10_bio;
+		struct {
+			struct r10bio r10_bio;
+			struct r10dev devs[conf->copies];
+		} on_stack;
+		struct r10bio *r10_bio = &on_stack.r10_bio;
 		int s;
 		if (conf->reshape_progress != MaxSector) {
 			/* Cannot give any guidance during reshape */
@@ -667,18 +671,18 @@
 				return biovec->bv_len;
 			return 0;
 		}
-		r10_bio.sector = sector;
-		raid10_find_phys(conf, &r10_bio);
+		r10_bio->sector = sector;
+		raid10_find_phys(conf, r10_bio);
 		rcu_read_lock();
 		for (s = 0; s < conf->copies; s++) {
-			int disk = r10_bio.devs[s].devnum;
+			int disk = r10_bio->devs[s].devnum;
 			struct md_rdev *rdev = rcu_dereference(
 				conf->mirrors[disk].rdev);
 			if (rdev && !test_bit(Faulty, &rdev->flags)) {
 				struct request_queue *q =
 					bdev_get_queue(rdev->bdev);
 				if (q->merge_bvec_fn) {
-					bvm->bi_sector = r10_bio.devs[s].addr
+					bvm->bi_sector = r10_bio->devs[s].addr
 						+ rdev->data_offset;
 					bvm->bi_bdev = rdev->bdev;
 					max = min(max, q->merge_bvec_fn(
@@ -690,7 +694,7 @@
 				struct request_queue *q =
 					bdev_get_queue(rdev->bdev);
 				if (q->merge_bvec_fn) {
-					bvm->bi_sector = r10_bio.devs[s].addr
+					bvm->bi_sector = r10_bio->devs[s].addr
 						+ rdev->data_offset;
 					bvm->bi_bdev = rdev->bdev;
 					max = min(max, q->merge_bvec_fn(
@@ -4414,14 +4418,18 @@
 {
 	/* Use sync reads to get the blocks from somewhere else */
 	int sectors = r10_bio->sectors;
-	struct r10bio r10b;
 	struct r10conf *conf = mddev->private;
+	struct {
+		struct r10bio r10_bio;
+		struct r10dev devs[conf->copies];
+	} on_stack;
+	struct r10bio *r10b = &on_stack.r10_bio;
 	int slot = 0;
 	int idx = 0;
 	struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
 
-	r10b.sector = r10_bio->sector;
-	__raid10_find_phys(&conf->prev, &r10b);
+	r10b->sector = r10_bio->sector;
+	__raid10_find_phys(&conf->prev, r10b);
 
 	while (sectors) {
 		int s = sectors;
@@ -4432,7 +4440,7 @@
 			s = PAGE_SIZE >> 9;
 
 		while (!success) {
-			int d = r10b.devs[slot].devnum;
+			int d = r10b->devs[slot].devnum;
 			struct md_rdev *rdev = conf->mirrors[d].rdev;
 			sector_t addr;
 			if (rdev == NULL ||
@@ -4440,7 +4448,7 @@
 			    !test_bit(In_sync, &rdev->flags))
 				goto failed;
 
-			addr = r10b.devs[slot].addr + idx * PAGE_SIZE;
+			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
 			success = sync_page_io(rdev,
 					       addr,
 					       s << 9,
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 007c2c6..1054cf6 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -110,7 +110,7 @@
 	 * We choose the number when they are allocated.
 	 * We sometimes need an extra bio to write to the replacement.
 	 */
-	struct {
+	struct r10dev {
 		struct bio	*bio;
 		union {
 			struct bio	*repl_bio; /* used for resync and
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 664e460..aac6222 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -481,7 +481,7 @@
 	return 0;
 }
 
-static const struct usb_device_id smsusb_id_table[] __devinitconst = {
+static const struct usb_device_id smsusb_id_table[] = {
 	{ USB_DEVICE(0x187f, 0x0010),
 		.driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
 	{ USB_DEVICE(0x187f, 0x0100),
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index d0b6bb5..72ded29 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -35,6 +35,11 @@
 #include <media/v4l2-device.h>
 #include <sound/tea575x-tuner.h>
 
+#if defined(CONFIG_LEDS_CLASS) || \
+    (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE))
+#define SHARK_USE_LEDS 1
+#endif
+
 /*
  * Version Information
  */
@@ -56,44 +61,18 @@
 
 enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS };
 
-static void shark_led_set_blue(struct led_classdev *led_cdev,
-			       enum led_brightness value);
-static void shark_led_set_blue_pulse(struct led_classdev *led_cdev,
-				     enum led_brightness value);
-static void shark_led_set_red(struct led_classdev *led_cdev,
-			      enum led_brightness value);
-
-static const struct led_classdev shark_led_templates[NO_LEDS] = {
-	[BLUE_LED] = {
-		.name		= "%s:blue:",
-		.brightness	= LED_OFF,
-		.max_brightness = 127,
-		.brightness_set = shark_led_set_blue,
-	},
-	[BLUE_PULSE_LED] = {
-		.name		= "%s:blue-pulse:",
-		.brightness	= LED_OFF,
-		.max_brightness = 255,
-		.brightness_set = shark_led_set_blue_pulse,
-	},
-	[RED_LED] = {
-		.name		= "%s:red:",
-		.brightness	= LED_OFF,
-		.max_brightness = 1,
-		.brightness_set = shark_led_set_red,
-	},
-};
-
 struct shark_device {
 	struct usb_device *usbdev;
 	struct v4l2_device v4l2_dev;
 	struct snd_tea575x tea;
 
+#ifdef SHARK_USE_LEDS
 	struct work_struct led_work;
 	struct led_classdev leds[NO_LEDS];
 	char led_names[NO_LEDS][32];
 	atomic_t brightness[NO_LEDS];
 	unsigned long brightness_new;
+#endif
 
 	u8 *transfer_buffer;
 	u32 last_val;
@@ -175,20 +154,13 @@
 	.read_val  = shark_read_val,
 };
 
+#ifdef SHARK_USE_LEDS
 static void shark_led_work(struct work_struct *work)
 {
 	struct shark_device *shark =
 		container_of(work, struct shark_device, led_work);
 	int i, res, brightness, actual_len;
 
-	/*
-	 * We use the v4l2_dev lock and registered bit to ensure the device
-	 * does not get unplugged and unreffed while we're running.
-	 */
-	mutex_lock(&shark->tea.mutex);
-	if (!video_is_registered(&shark->tea.vd))
-		goto leave;
-
 	for (i = 0; i < 3; i++) {
 		if (!test_and_clear_bit(i, &shark->brightness_new))
 			continue;
@@ -208,8 +180,6 @@
 			v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",
 				 shark->led_names[i], res);
 	}
-leave:
-	mutex_unlock(&shark->tea.mutex);
 }
 
 static void shark_led_set_blue(struct led_classdev *led_cdev,
@@ -245,19 +215,78 @@
 	schedule_work(&shark->led_work);
 }
 
+static const struct led_classdev shark_led_templates[NO_LEDS] = {
+	[BLUE_LED] = {
+		.name		= "%s:blue:",
+		.brightness	= LED_OFF,
+		.max_brightness = 127,
+		.brightness_set = shark_led_set_blue,
+	},
+	[BLUE_PULSE_LED] = {
+		.name		= "%s:blue-pulse:",
+		.brightness	= LED_OFF,
+		.max_brightness = 255,
+		.brightness_set = shark_led_set_blue_pulse,
+	},
+	[RED_LED] = {
+		.name		= "%s:red:",
+		.brightness	= LED_OFF,
+		.max_brightness = 1,
+		.brightness_set = shark_led_set_red,
+	},
+};
+
+static int shark_register_leds(struct shark_device *shark, struct device *dev)
+{
+	int i, retval;
+
+	INIT_WORK(&shark->led_work, shark_led_work);
+	for (i = 0; i < NO_LEDS; i++) {
+		shark->leds[i] = shark_led_templates[i];
+		snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
+			 shark->leds[i].name, shark->v4l2_dev.name);
+		shark->leds[i].name = shark->led_names[i];
+		retval = led_classdev_register(dev, &shark->leds[i]);
+		if (retval) {
+			v4l2_err(&shark->v4l2_dev,
+				 "couldn't register led: %s\n",
+				 shark->led_names[i]);
+			return retval;
+		}
+	}
+	return 0;
+}
+
+static void shark_unregister_leds(struct shark_device *shark)
+{
+	int i;
+
+	for (i = 0; i < NO_LEDS; i++)
+		led_classdev_unregister(&shark->leds[i]);
+
+	cancel_work_sync(&shark->led_work);
+}
+#else
+static int shark_register_leds(struct shark_device *shark, struct device *dev)
+{
+	v4l2_warn(&shark->v4l2_dev,
+		  "CONFIG_LED_CLASS not enabled, LED support disabled\n");
+	return 0;
+}
+static inline void shark_unregister_leds(struct shark_device *shark) { }
+#endif
+
 static void usb_shark_disconnect(struct usb_interface *intf)
 {
 	struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);
 	struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
-	int i;
 
 	mutex_lock(&shark->tea.mutex);
 	v4l2_device_disconnect(&shark->v4l2_dev);
 	snd_tea575x_exit(&shark->tea);
 	mutex_unlock(&shark->tea.mutex);
 
-	for (i = 0; i < NO_LEDS; i++)
-		led_classdev_unregister(&shark->leds[i]);
+	shark_unregister_leds(shark);
 
 	v4l2_device_put(&shark->v4l2_dev);
 }
@@ -266,7 +295,6 @@
 {
 	struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
 
-	cancel_work_sync(&shark->led_work);
 	v4l2_device_unregister(&shark->v4l2_dev);
 	kfree(shark->transfer_buffer);
 	kfree(shark);
@@ -276,7 +304,7 @@
 			   const struct usb_device_id *id)
 {
 	struct shark_device *shark;
-	int i, retval = -ENOMEM;
+	int retval = -ENOMEM;
 
 	shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
 	if (!shark)
@@ -286,17 +314,13 @@
 	if (!shark->transfer_buffer)
 		goto err_alloc_buffer;
 
-	/*
-	 * Work around a bug in usbhid/hid-core.c, where it leaves a dangling
-	 * pointer in intfdata causing v4l2-device.c to not set it. Which
-	 * results in usb_shark_disconnect() referencing the dangling pointer
-	 *
-	 * REMOVE (as soon as the above bug is fixed, patch submitted)
-	 */
-	usb_set_intfdata(intf, NULL);
+	v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
+
+	retval = shark_register_leds(shark, &intf->dev);
+	if (retval)
+		goto err_reg_leds;
 
 	shark->v4l2_dev.release = usb_shark_release;
-	v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
 	retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);
 	if (retval) {
 		v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n");
@@ -320,32 +344,13 @@
 		goto err_init_tea;
 	}
 
-	INIT_WORK(&shark->led_work, shark_led_work);
-	for (i = 0; i < NO_LEDS; i++) {
-		shark->leds[i] = shark_led_templates[i];
-		snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
-			 shark->leds[i].name, shark->v4l2_dev.name);
-		shark->leds[i].name = shark->led_names[i];
-		/*
-		 * We don't fail the probe if we fail to register the leds,
-		 * because once we've called snd_tea575x_init, the /dev/radio0
-		 * node may be opened from userspace holding a reference to us!
-		 *
-		 * Note we cannot register the leds first instead as
-		 * shark_led_work depends on the v4l2 mutex and registered bit.
-		 */
-		retval = led_classdev_register(&intf->dev, &shark->leds[i]);
-		if (retval)
-			v4l2_err(&shark->v4l2_dev,
-				 "couldn't register led: %s\n",
-				 shark->led_names[i]);
-	}
-
 	return 0;
 
 err_init_tea:
 	v4l2_device_unregister(&shark->v4l2_dev);
 err_reg_dev:
+	shark_unregister_leds(shark);
+err_reg_leds:
 	kfree(shark->transfer_buffer);
 err_alloc_buffer:
 	kfree(shark);
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index b9575de..7b4efdf 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -35,6 +35,11 @@
 #include <media/v4l2-device.h>
 #include "radio-tea5777.h"
 
+#if defined(CONFIG_LEDS_CLASS) || \
+    (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK2_MODULE))
+#define SHARK_USE_LEDS 1
+#endif
+
 MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver");
 MODULE_LICENSE("GPL");
@@ -43,7 +48,6 @@
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-1)");
 
-
 #define SHARK_IN_EP		0x83
 #define SHARK_OUT_EP		0x05
 
@@ -54,36 +58,18 @@
 
 enum { BLUE_LED, RED_LED, NO_LEDS };
 
-static void shark_led_set_blue(struct led_classdev *led_cdev,
-			       enum led_brightness value);
-static void shark_led_set_red(struct led_classdev *led_cdev,
-			      enum led_brightness value);
-
-static const struct led_classdev shark_led_templates[NO_LEDS] = {
-	[BLUE_LED] = {
-		.name		= "%s:blue:",
-		.brightness	= LED_OFF,
-		.max_brightness = 127,
-		.brightness_set = shark_led_set_blue,
-	},
-	[RED_LED] = {
-		.name		= "%s:red:",
-		.brightness	= LED_OFF,
-		.max_brightness = 1,
-		.brightness_set = shark_led_set_red,
-	},
-};
-
 struct shark_device {
 	struct usb_device *usbdev;
 	struct v4l2_device v4l2_dev;
 	struct radio_tea5777 tea;
 
+#ifdef SHARK_USE_LEDS
 	struct work_struct led_work;
 	struct led_classdev leds[NO_LEDS];
 	char led_names[NO_LEDS][32];
 	atomic_t brightness[NO_LEDS];
 	unsigned long brightness_new;
+#endif
 
 	u8 *transfer_buffer;
 };
@@ -161,18 +147,12 @@
 	.read_reg  = shark_read_reg,
 };
 
+#ifdef SHARK_USE_LEDS
 static void shark_led_work(struct work_struct *work)
 {
 	struct shark_device *shark =
 		container_of(work, struct shark_device, led_work);
 	int i, res, brightness, actual_len;
-	/*
-	 * We use the v4l2_dev lock and registered bit to ensure the device
-	 * does not get unplugged and unreffed while we're running.
-	 */
-	mutex_lock(&shark->tea.mutex);
-	if (!video_is_registered(&shark->tea.vd))
-		goto leave;
 
 	for (i = 0; i < 2; i++) {
 		if (!test_and_clear_bit(i, &shark->brightness_new))
@@ -191,8 +171,6 @@
 			v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",
 				 shark->led_names[i], res);
 	}
-leave:
-	mutex_unlock(&shark->tea.mutex);
 }
 
 static void shark_led_set_blue(struct led_classdev *led_cdev,
@@ -217,19 +195,72 @@
 	schedule_work(&shark->led_work);
 }
 
+static const struct led_classdev shark_led_templates[NO_LEDS] = {
+	[BLUE_LED] = {
+		.name		= "%s:blue:",
+		.brightness	= LED_OFF,
+		.max_brightness = 127,
+		.brightness_set = shark_led_set_blue,
+	},
+	[RED_LED] = {
+		.name		= "%s:red:",
+		.brightness	= LED_OFF,
+		.max_brightness = 1,
+		.brightness_set = shark_led_set_red,
+	},
+};
+
+static int shark_register_leds(struct shark_device *shark, struct device *dev)
+{
+	int i, retval;
+
+	INIT_WORK(&shark->led_work, shark_led_work);
+	for (i = 0; i < NO_LEDS; i++) {
+		shark->leds[i] = shark_led_templates[i];
+		snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
+			 shark->leds[i].name, shark->v4l2_dev.name);
+		shark->leds[i].name = shark->led_names[i];
+		retval = led_classdev_register(dev, &shark->leds[i]);
+		if (retval) {
+			v4l2_err(&shark->v4l2_dev,
+				 "couldn't register led: %s\n",
+				 shark->led_names[i]);
+			return retval;
+		}
+	}
+	return 0;
+}
+
+static void shark_unregister_leds(struct shark_device *shark)
+{
+	int i;
+
+	for (i = 0; i < NO_LEDS; i++)
+		led_classdev_unregister(&shark->leds[i]);
+
+	cancel_work_sync(&shark->led_work);
+}
+#else
+static int shark_register_leds(struct shark_device *shark, struct device *dev)
+{
+	v4l2_warn(&shark->v4l2_dev,
+		  "CONFIG_LED_CLASS not enabled, LED support disabled\n");
+	return 0;
+}
+static inline void shark_unregister_leds(struct shark_device *shark) { }
+#endif
+
 static void usb_shark_disconnect(struct usb_interface *intf)
 {
 	struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);
 	struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
-	int i;
 
 	mutex_lock(&shark->tea.mutex);
 	v4l2_device_disconnect(&shark->v4l2_dev);
 	radio_tea5777_exit(&shark->tea);
 	mutex_unlock(&shark->tea.mutex);
 
-	for (i = 0; i < NO_LEDS; i++)
-		led_classdev_unregister(&shark->leds[i]);
+	shark_unregister_leds(shark);
 
 	v4l2_device_put(&shark->v4l2_dev);
 }
@@ -238,7 +269,6 @@
 {
 	struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
 
-	cancel_work_sync(&shark->led_work);
 	v4l2_device_unregister(&shark->v4l2_dev);
 	kfree(shark->transfer_buffer);
 	kfree(shark);
@@ -248,7 +278,7 @@
 			   const struct usb_device_id *id)
 {
 	struct shark_device *shark;
-	int i, retval = -ENOMEM;
+	int retval = -ENOMEM;
 
 	shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
 	if (!shark)
@@ -258,17 +288,13 @@
 	if (!shark->transfer_buffer)
 		goto err_alloc_buffer;
 
-	/*
-	 * Work around a bug in usbhid/hid-core.c, where it leaves a dangling
-	 * pointer in intfdata causing v4l2-device.c to not set it. Which
-	 * results in usb_shark_disconnect() referencing the dangling pointer
-	 *
-	 * REMOVE (as soon as the above bug is fixed, patch submitted)
-	 */
-	usb_set_intfdata(intf, NULL);
+	v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
+
+	retval = shark_register_leds(shark, &intf->dev);
+	if (retval)
+		goto err_reg_leds;
 
 	shark->v4l2_dev.release = usb_shark_release;
-	v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
 	retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);
 	if (retval) {
 		v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n");
@@ -292,32 +318,13 @@
 		goto err_init_tea;
 	}
 
-	INIT_WORK(&shark->led_work, shark_led_work);
-	for (i = 0; i < NO_LEDS; i++) {
-		shark->leds[i] = shark_led_templates[i];
-		snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
-			 shark->leds[i].name, shark->v4l2_dev.name);
-		shark->leds[i].name = shark->led_names[i];
-		/*
-		 * We don't fail the probe if we fail to register the leds,
-		 * because once we've called radio_tea5777_init, the /dev/radio0
-		 * node may be opened from userspace holding a reference to us!
-		 *
-		 * Note we cannot register the leds first instead as
-		 * shark_led_work depends on the v4l2 mutex and registered bit.
-		 */
-		retval = led_classdev_register(&intf->dev, &shark->leds[i]);
-		if (retval)
-			v4l2_err(&shark->v4l2_dev,
-				 "couldn't register led: %s\n",
-				 shark->led_names[i]);
-	}
-
 	return 0;
 
 err_init_tea:
 	v4l2_device_unregister(&shark->v4l2_dev);
 err_reg_dev:
+	shark_unregister_leds(shark);
+err_reg_leds:
 	kfree(shark->transfer_buffer);
 err_alloc_buffer:
 	kfree(shark);
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 9e38132..9bb65e1 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -151,6 +151,7 @@
 		.index = 0,
 		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
 			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
+			    V4L2_TUNER_CAP_FREQ_BANDS |
 			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
 			    V4L2_TUNER_CAP_HWSEEK_WRAP,
 		.rangelow   =  87500 * 16,
@@ -162,6 +163,7 @@
 		.index = 1,
 		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
 			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
+			    V4L2_TUNER_CAP_FREQ_BANDS |
 			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
 			    V4L2_TUNER_CAP_HWSEEK_WRAP,
 		.rangelow   =  76000 * 16,
@@ -173,6 +175,7 @@
 		.index = 2,
 		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
 			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
+			    V4L2_TUNER_CAP_FREQ_BANDS |
 			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
 			    V4L2_TUNER_CAP_HWSEEK_WRAP,
 		.rangelow   =  76000 * 16,
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 643a6ff..f867f04 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -225,8 +225,9 @@
 {
 	strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
 	strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
-	capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
-		V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+	capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
+		V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
+	capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
 
 	return 0;
 }
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 146be42..be076f7 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -531,7 +531,7 @@
 	strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
 	usb_make_path(radio->usbdev, capability->bus_info,
 			sizeof(capability->bus_info));
-	capability->device_caps = V4L2_CAP_HW_FREQ_SEEK |
+	capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
 		V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
 	capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
 	return 0;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 5180390..8be5763 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -261,6 +261,7 @@
 
 config IR_IGUANA
 	tristate "IguanaWorks USB IR Transceiver"
+	depends on USB_ARCH_HAS_HCD
 	depends on RC_CORE
 	select USB
 	---help---
diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c
index cf9d9fc..2347771 100644
--- a/drivers/media/video/gspca/jl2005bcd.c
+++ b/drivers/media/video/gspca/jl2005bcd.c
@@ -512,7 +512,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0979, 0x0227)},
 	{}
 };
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index 969bb5a..bab01c86 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -579,7 +579,7 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x06e1, 0xa190)},
 /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
 	{USB_DEVICE(0x0733, 0x0430)}, */
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 7efe9ad..0b91a5c 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -431,7 +431,7 @@
 	strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
 	strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
 	strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info));
-	cap->capabilities = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+	cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 	return 0;
 }
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index d2e6f82..560a65a 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -403,7 +403,7 @@
 
 	dev_dbg(pcdev->icd->parent, "Activate device\n");
 
-	clk_enable(pcdev->clk);
+	clk_prepare_enable(pcdev->clk);
 
 	/* enable CSI before doing anything else */
 	__raw_writel(csicr1, pcdev->base + CSICR1);
@@ -422,7 +422,7 @@
 	/* Disable all CSI interface */
 	__raw_writel(0x00, pcdev->base + CSICR1);
 
-	clk_disable(pcdev->clk);
+	clk_disable_unprepare(pcdev->clk);
 }
 
 /*
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 637bde8..ac17540 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -272,7 +272,7 @@
 	struct device		*dev;
 	struct soc_camera_host	soc_host;
 	struct soc_camera_device *icd;
-	struct clk		*clk_csi, *clk_emma;
+	struct clk		*clk_csi, *clk_emma_ahb, *clk_emma_ipg;
 
 	unsigned int		irq_csi, irq_emma;
 	void __iomem		*base_csi, *base_emma;
@@ -407,7 +407,7 @@
 {
 	unsigned long flags;
 
-	clk_disable(pcdev->clk_csi);
+	clk_disable_unprepare(pcdev->clk_csi);
 	writel(0, pcdev->base_csi + CSICR1);
 	if (cpu_is_mx27()) {
 		writel(0, pcdev->base_emma + PRP_CNTL);
@@ -435,7 +435,7 @@
 	if (pcdev->icd)
 		return -EBUSY;
 
-	ret = clk_enable(pcdev->clk_csi);
+	ret = clk_prepare_enable(pcdev->clk_csi);
 	if (ret < 0)
 		return ret;
 
@@ -1633,23 +1633,34 @@
 		goto exit_iounmap;
 	}
 
-	pcdev->clk_emma = clk_get(NULL, "emma");
-	if (IS_ERR(pcdev->clk_emma)) {
-		err = PTR_ERR(pcdev->clk_emma);
+	pcdev->clk_emma_ipg = clk_get(pcdev->dev, "emma-ipg");
+	if (IS_ERR(pcdev->clk_emma_ipg)) {
+		err = PTR_ERR(pcdev->clk_emma_ipg);
 		goto exit_free_irq;
 	}
 
-	clk_enable(pcdev->clk_emma);
+	clk_prepare_enable(pcdev->clk_emma_ipg);
+
+	pcdev->clk_emma_ahb = clk_get(pcdev->dev, "emma-ahb");
+	if (IS_ERR(pcdev->clk_emma_ahb)) {
+		err = PTR_ERR(pcdev->clk_emma_ahb);
+		goto exit_clk_emma_ipg_put;
+	}
+
+	clk_prepare_enable(pcdev->clk_emma_ahb);
 
 	err = mx27_camera_emma_prp_reset(pcdev);
 	if (err)
-		goto exit_clk_emma_put;
+		goto exit_clk_emma_ahb_put;
 
 	return err;
 
-exit_clk_emma_put:
-	clk_disable(pcdev->clk_emma);
-	clk_put(pcdev->clk_emma);
+exit_clk_emma_ahb_put:
+	clk_disable_unprepare(pcdev->clk_emma_ahb);
+	clk_put(pcdev->clk_emma_ahb);
+exit_clk_emma_ipg_put:
+	clk_disable_unprepare(pcdev->clk_emma_ipg);
+	clk_put(pcdev->clk_emma_ipg);
 exit_free_irq:
 	free_irq(pcdev->irq_emma, pcdev);
 exit_iounmap:
@@ -1685,7 +1696,7 @@
 		goto exit;
 	}
 
-	pcdev->clk_csi = clk_get(&pdev->dev, NULL);
+	pcdev->clk_csi = clk_get(&pdev->dev, "ahb");
 	if (IS_ERR(pcdev->clk_csi)) {
 		dev_err(&pdev->dev, "Could not get csi clock\n");
 		err = PTR_ERR(pcdev->clk_csi);
@@ -1785,8 +1796,10 @@
 eallocctx:
 	if (cpu_is_mx27()) {
 		free_irq(pcdev->irq_emma, pcdev);
-		clk_disable(pcdev->clk_emma);
-		clk_put(pcdev->clk_emma);
+		clk_disable_unprepare(pcdev->clk_emma_ipg);
+		clk_put(pcdev->clk_emma_ipg);
+		clk_disable_unprepare(pcdev->clk_emma_ahb);
+		clk_put(pcdev->clk_emma_ahb);
 		iounmap(pcdev->base_emma);
 		release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma));
 	}
@@ -1825,8 +1838,10 @@
 	iounmap(pcdev->base_csi);
 
 	if (cpu_is_mx27()) {
-		clk_disable(pcdev->clk_emma);
-		clk_put(pcdev->clk_emma);
+		clk_disable_unprepare(pcdev->clk_emma_ipg);
+		clk_put(pcdev->clk_emma_ipg);
+		clk_disable_unprepare(pcdev->clk_emma_ahb);
+		clk_put(pcdev->clk_emma_ahb);
 		iounmap(pcdev->base_emma);
 		res = pcdev->res_emma;
 		release_mem_region(res->start, resource_size(res));
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index f13643d..af2297d 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -61,15 +61,9 @@
 
 #define MAX_VIDEO_MEM 16
 
-enum csi_buffer_state {
-	CSI_BUF_NEEDS_INIT,
-	CSI_BUF_PREPARED,
-};
-
 struct mx3_camera_buffer {
 	/* common v4l buffer stuff -- must be first */
 	struct vb2_buffer			vb;
-	enum csi_buffer_state			state;
 	struct list_head			queue;
 
 	/* One descriptot per scatterlist (per frame) */
@@ -285,7 +279,7 @@
 		goto error;
 	}
 
-	if (buf->state == CSI_BUF_NEEDS_INIT) {
+	if (!buf->txd) {
 		sg_dma_address(sg)	= vb2_dma_contig_plane_dma_addr(vb, 0);
 		sg_dma_len(sg)		= new_size;
 
@@ -298,7 +292,6 @@
 		txd->callback_param	= txd;
 		txd->callback		= mx3_cam_dma_done;
 
-		buf->state		= CSI_BUF_PREPARED;
 		buf->txd		= txd;
 	} else {
 		txd = buf->txd;
@@ -385,7 +378,6 @@
 
 	/* Doesn't hurt also if the list is empty */
 	list_del_init(&buf->queue);
-	buf->state = CSI_BUF_NEEDS_INIT;
 
 	if (txd) {
 		buf->txd = NULL;
@@ -405,13 +397,13 @@
 	struct mx3_camera_dev *mx3_cam = ici->priv;
 	struct mx3_camera_buffer *buf = to_mx3_vb(vb);
 
-	/* This is for locking debugging only */
-	INIT_LIST_HEAD(&buf->queue);
-	sg_init_table(&buf->sg, 1);
+	if (!buf->txd) {
+		/* This is for locking debugging only */
+		INIT_LIST_HEAD(&buf->queue);
+		sg_init_table(&buf->sg, 1);
 
-	buf->state = CSI_BUF_NEEDS_INIT;
-
-	mx3_cam->buf_total += vb2_plane_size(vb, 0);
+		mx3_cam->buf_total += vb2_plane_size(vb, 0);
+	}
 
 	return 0;
 }
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b03ffec..1bde255 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -171,7 +171,8 @@
 	dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
 		pixfmtstr(pix->pixelformat), pix->width, pix->height);
 
-	if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
+	if (pix->pixelformat != V4L2_PIX_FMT_JPEG &&
+	    !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
 		pix->bytesperline = 0;
 		pix->sizeimage = 0;
 	}
diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
index 89dce09..a397812 100644
--- a/drivers/media/video/soc_mediabus.c
+++ b/drivers/media/video/soc_mediabus.c
@@ -378,6 +378,9 @@
 
 s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
 {
+	if (mf->fourcc == V4L2_PIX_FMT_JPEG)
+		return 0;
+
 	if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
 		return width * mf->bits_per_sample / 8;
 
@@ -400,6 +403,9 @@
 s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
 			u32 bytes_per_line, u32 height)
 {
+	if (mf->fourcc == V4L2_PIX_FMT_JPEG)
+		return 0;
+
 	if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
 		return bytes_per_line * height;
 
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 9288fbd..5577381 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -338,6 +338,7 @@
 	if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
 		buf->error = 0;
 		buf->state = UVC_BUF_STATE_QUEUED;
+		buf->bytesused = 0;
 		vb2_set_plane_payload(&buf->buf, 0, 0);
 		return buf;
 	}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index c3b7b5f..6bc47fc 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -402,8 +402,10 @@
 {
 	const struct v4l2_hw_freq_seek *p = arg;
 
-	pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n",
-		p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing);
+	pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, "
+		"rangelow=%u, rangehigh=%u\n",
+		p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
+		p->rangelow, p->rangehigh);
 }
 
 static void v4l_print_requestbuffers(const void *arg, bool write_only)
@@ -1853,6 +1855,8 @@
 			.type = type,
 		};
 
+		if (p->index)
+			return -EINVAL;
 		err = ops->vidioc_g_tuner(file, fh, &t);
 		if (err)
 			return err;
@@ -1870,6 +1874,8 @@
 
 		if (type != V4L2_TUNER_RADIO)
 			return -EINVAL;
+		if (p->index)
+			return -EINVAL;
 		err = ops->vidioc_g_modulator(file, fh, &m);
 		if (err)
 			return err;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index d1facef..b1a1462 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -395,7 +395,8 @@
 
 config MFD_TC6393XB
 	bool "Support Toshiba TC6393XB"
-	depends on GPIOLIB && ARM && HAVE_CLK
+	depends on ARM && HAVE_CLK
+	select GPIOLIB
 	select MFD_CORE
 	select MFD_TMIO
 	help
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 383421b..683e18a 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -925,6 +925,7 @@
 			goto out;
 	}
 
+	ret = 0;
 	if (pdata->leds) {
 		int i;
 
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 43a76c4..db662e2 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -202,7 +202,7 @@
 		}
 		local_irq_enable();
 		ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
-	} while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
+	} while (gpio_get_value(pdata->gpio));
 }
 
 static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index c6ffbbe..d78c05e 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -1253,7 +1253,7 @@
 			if (dev->wd_timeout)
 				*slots -= mei_data2slots(MEI_START_WD_DATA_SIZE);
 			else
-				*slots -= mei_data2slots(MEI_START_WD_DATA_SIZE);
+				*slots -= mei_data2slots(MEI_WD_PARAMS_SIZE);
 		}
 	}
 	if (dev->stop)
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 0923302..7422c76 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -925,6 +925,27 @@
 };
 
 /**
+ * mei_quirk_probe - probe for devices that doesn't valid ME interface
+ * @pdev: PCI device structure
+ * @ent: entry into pci_device_table
+ *
+ * returns true if ME Interface is valid, false otherwise
+ */
+static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	u32 reg;
+	if (ent->device == MEI_DEV_ID_PBG_1) {
+		pci_read_config_dword(pdev, 0x48, &reg);
+		/* make sure that bit 9 is up and bit 10 is down */
+		if ((reg & 0x600) == 0x200) {
+			dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+			return false;
+		}
+	}
+	return true;
+}
+/**
  * mei_probe - Device Initialization Routine
  *
  * @pdev: PCI device structure
@@ -939,6 +960,12 @@
 	int err;
 
 	mutex_lock(&mei_mutex);
+
+	if (!mei_quirk_probe(pdev, ent)) {
+		err = -ENODEV;
+		goto end;
+	}
+
 	if (mei_device) {
 		err = -EEXIST;
 		goto end;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 87b251a..b9e2000 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,6 +18,8 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <asm/uv/uv_hub.h>
@@ -59,6 +61,8 @@
 					 XPC_NOTIFY_MSG_SIZE_UV)
 #define XPC_NOTIFY_IRQ_NAME		"xpc_notify"
 
+static int xpc_mq_node = -1;
+
 static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
 static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
 
@@ -109,11 +113,8 @@
 #if defined CONFIG_X86_64
 	mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
 			UV_AFFINITY_CPU);
-	if (mq->irq < 0) {
-		dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
-			-mq->irq);
+	if (mq->irq < 0)
 		return mq->irq;
-	}
 
 	mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
 
@@ -238,8 +239,9 @@
 	mq->mmr_blade = uv_cpu_to_blade_id(cpu);
 
 	nid = cpu_to_node(cpu);
-	page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
-				pg_order);
+	page = alloc_pages_exact_node(nid,
+				      GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+				      pg_order);
 	if (page == NULL) {
 		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
 			"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
@@ -1731,9 +1733,50 @@
 	.notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
 };
 
+static int
+xpc_init_mq_node(int nid)
+{
+	int cpu;
+
+	get_online_cpus();
+
+	for_each_cpu(cpu, cpumask_of_node(nid)) {
+		xpc_activate_mq_uv =
+			xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
+					     XPC_ACTIVATE_IRQ_NAME,
+					     xpc_handle_activate_IRQ_uv);
+		if (!IS_ERR(xpc_activate_mq_uv))
+			break;
+	}
+	if (IS_ERR(xpc_activate_mq_uv)) {
+		put_online_cpus();
+		return PTR_ERR(xpc_activate_mq_uv);
+	}
+
+	for_each_cpu(cpu, cpumask_of_node(nid)) {
+		xpc_notify_mq_uv =
+			xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
+					     XPC_NOTIFY_IRQ_NAME,
+					     xpc_handle_notify_IRQ_uv);
+		if (!IS_ERR(xpc_notify_mq_uv))
+			break;
+	}
+	if (IS_ERR(xpc_notify_mq_uv)) {
+		xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+		put_online_cpus();
+		return PTR_ERR(xpc_notify_mq_uv);
+	}
+
+	put_online_cpus();
+	return 0;
+}
+
 int
 xpc_init_uv(void)
 {
+	int nid;
+	int ret = 0;
+
 	xpc_arch_ops = xpc_arch_ops_uv;
 
 	if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
@@ -1742,21 +1785,21 @@
 		return -E2BIG;
 	}
 
-	xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
-						  XPC_ACTIVATE_IRQ_NAME,
-						  xpc_handle_activate_IRQ_uv);
-	if (IS_ERR(xpc_activate_mq_uv))
-		return PTR_ERR(xpc_activate_mq_uv);
+	if (xpc_mq_node < 0)
+		for_each_online_node(nid) {
+			ret = xpc_init_mq_node(nid);
 
-	xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
-						XPC_NOTIFY_IRQ_NAME,
-						xpc_handle_notify_IRQ_uv);
-	if (IS_ERR(xpc_notify_mq_uv)) {
-		xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
-		return PTR_ERR(xpc_notify_mq_uv);
-	}
+			if (!ret)
+				break;
+		}
+	else
+		ret = xpc_init_mq_node(xpc_mq_node);
 
-	return 0;
+	if (ret < 0)
+		dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
+			-ret);
+
+	return ret;
 }
 
 void
@@ -1765,3 +1808,6 @@
 	xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
 	xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
 }
+
+module_param(xpc_mq_node, int, 0);
+MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 1ff460a..93b4d67 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -87,7 +87,7 @@
 	/* communicate to platform about chip wakeup */
 	kim_data = st_data->kim_data;
 	pdata = kim_data->kim_pdev->dev.platform_data;
-	if (pdata->chip_asleep)
+	if (pdata->chip_awake)
 		pdata->chip_awake(NULL);
 }
 
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index cfff454..c3bb304 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -19,14 +19,13 @@
 #include <linux/mtd/map.h>
 #include <linux/mtd/partitions.h>
 #include <asm/io.h>
+#include <asm/sections.h>
 
 /****************************************************************************/
 
-extern char _ebss;
-
 struct map_info uclinux_ram_map = {
 	.name = "RAM",
-	.phys = (unsigned long)&_ebss,
+	.phys = (unsigned long)__bss_stop,
 	.size = 0,
 };
 
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 31bb7e5..8ca4176 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -480,7 +480,7 @@
 
 config MTD_NAND_GPMI_NAND
         bool "GPMI NAND Flash Controller driver"
-        depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
+        depends on MTD_NAND && MXS_DMA
         help
 	 Enables NAND Flash support for IMX23 or IMX28.
 	 The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index e9309b3..ac4fd75 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1245,7 +1245,6 @@
 			goto out_release_mem_region;
 		} else {
 			struct dma_slave_config cfg;
-			int rc;
 
 			memset(&cfg, 0, sizeof(cfg));
 			cfg.src_addr = info->phys_base;
@@ -1254,10 +1253,10 @@
 			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 			cfg.src_maxburst = 16;
 			cfg.dst_maxburst = 16;
-			rc = dmaengine_slave_config(info->dma, &cfg);
-			if (rc) {
+			err = dmaengine_slave_config(info->dma, &cfg);
+			if (err) {
 				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
-					rc);
+					err);
 				goto out_release_mem_region;
 			}
 			info->nand.read_buf   = omap_read_buf_dma_pref;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 545c09e..cff6f02 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -996,9 +996,7 @@
 		printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
 			cardname);
 	cops_dev = cops_probe(-1);
-	if (IS_ERR(cops_dev))
-		return PTR_ERR(cops_dev);
-        return 0;
+	return PTR_RET(cops_dev);
 }
 
 static void __exit cops_module_exit(void)
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 0910dce..b5782cd 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1243,9 +1243,7 @@
 		       "ltpc: Autoprobing is not recommended for modules\n");
 
 	dev_ltpc = ltpc_probe();
-	if (IS_ERR(dev_ltpc))
-		return PTR_ERR(dev_ltpc);
-	return 0;
+	return PTR_RET(dev_ltpc);
 }
 module_init(ltpc_module_init);
 #endif
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6fae5f3..b24ce25 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -398,7 +398,7 @@
 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
 	skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
 
-	if (unlikely(netpoll_tx_running(slave_dev)))
+	if (unlikely(netpoll_tx_running(bond->dev)))
 		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
 	else
 		dev_queue_xmit(skb);
@@ -1120,10 +1120,10 @@
 			write_unlock_bh(&bond->curr_slave_lock);
 			read_unlock(&bond->lock);
 
-			netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
+			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
 			if (should_notify_peers)
-				netdev_bonding_change(bond->dev,
-						      NETDEV_NOTIFY_PEERS);
+				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+							 bond->dev);
 
 			read_lock(&bond->lock);
 			write_lock_bh(&bond->curr_slave_lock);
@@ -1235,12 +1235,12 @@
 	struct netpoll *np;
 	int err = 0;
 
-	np = kzalloc(sizeof(*np), GFP_KERNEL);
+	np = kzalloc(sizeof(*np), GFP_ATOMIC);
 	err = -ENOMEM;
 	if (!np)
 		goto out;
 
-	err = __netpoll_setup(np, slave->dev);
+	err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
 	if (err) {
 		kfree(np);
 		goto out;
@@ -1257,9 +1257,7 @@
 		return;
 
 	slave->np = NULL;
-	synchronize_rcu_bh();
-	__netpoll_cleanup(np);
-	kfree(np);
+	__netpoll_free_rcu(np);
 }
 static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
 {
@@ -1292,7 +1290,7 @@
 	read_unlock(&bond->lock);
 }
 
-static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
 {
 	struct bonding *bond = netdev_priv(dev);
 	struct slave *slave;
@@ -1560,8 +1558,8 @@
 				 bond_dev->name,
 				 bond_dev->type, slave_dev->type);
 
-			res = netdev_bonding_change(bond_dev,
-						    NETDEV_PRE_TYPE_CHANGE);
+			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
+						       bond_dev);
 			res = notifier_to_errno(res);
 			if (res) {
 				pr_err("%s: refused to change device type\n",
@@ -1581,8 +1579,8 @@
 				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 			}
 
-			netdev_bonding_change(bond_dev,
-					      NETDEV_POST_TYPE_CHANGE);
+			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+						 bond_dev);
 		}
 	} else if (bond_dev->type != slave_dev->type) {
 		pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1943,7 +1941,7 @@
 	}
 
 	block_netpoll_tx();
-	netdev_bonding_change(bond_dev, NETDEV_RELEASE);
+	call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
 	write_lock_bh(&bond->lock);
 
 	slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2586,7 +2584,7 @@
 			read_unlock(&bond->lock);
 			return;
 		}
-		netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
 		rtnl_unlock();
 	}
 }
@@ -3205,7 +3203,7 @@
 			read_unlock(&bond->lock);
 			return;
 		}
-		netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
 		rtnl_unlock();
 	}
 }
@@ -3354,56 +3352,93 @@
 /*---------------------------- Hashing Policies -----------------------------*/
 
 /*
- * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP mimic bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
-{
-	struct ethhdr *data = (struct ethhdr *)skb->data;
-	struct iphdr *iph = ip_hdr(skb);
-
-	if (skb->protocol == htons(ETH_P_IP)) {
-		return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
-			(data->h_dest[5] ^ data->h_source[5])) % count;
-	}
-
-	return (data->h_dest[5] ^ data->h_source[5]) % count;
-}
-
-/*
- * Hash for the output device based upon layer 3 and layer 4 data. If
- * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
- * altogether not IP, mimic bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
-{
-	struct ethhdr *data = (struct ethhdr *)skb->data;
-	struct iphdr *iph = ip_hdr(skb);
-	__be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
-	int layer4_xor = 0;
-
-	if (skb->protocol == htons(ETH_P_IP)) {
-		if (!ip_is_fragment(iph) &&
-		    (iph->protocol == IPPROTO_TCP ||
-		     iph->protocol == IPPROTO_UDP)) {
-			layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
-		}
-		return (layer4_xor ^
-			((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-
-	}
-
-	return (data->h_dest[5] ^ data->h_source[5]) % count;
-}
-
-/*
  * Hash for the output device based upon layer 2 data
  */
 static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
 {
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 
-	return (data->h_dest[5] ^ data->h_source[5]) % count;
+	if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
+		return (data->h_dest[5] ^ data->h_source[5]) % count;
+
+	return 0;
+}
+
+/*
+ * Hash for the output device based upon layer 2 and layer 3 data. If
+ * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
+ */
+static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
+{
+	struct ethhdr *data = (struct ethhdr *)skb->data;
+	struct iphdr *iph;
+	struct ipv6hdr *ipv6h;
+	u32 v6hash;
+	__be32 *s, *d;
+
+	if (skb->protocol == htons(ETH_P_IP) &&
+	    skb_network_header_len(skb) >= sizeof(*iph)) {
+		iph = ip_hdr(skb);
+		return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
+			(data->h_dest[5] ^ data->h_source[5])) % count;
+	} else if (skb->protocol == htons(ETH_P_IPV6) &&
+		   skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+		ipv6h = ipv6_hdr(skb);
+		s = &ipv6h->saddr.s6_addr32[0];
+		d = &ipv6h->daddr.s6_addr32[0];
+		v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+		v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
+		return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
+	}
+
+	return bond_xmit_hash_policy_l2(skb, count);
+}
+
+/*
+ * Hash for the output device based upon layer 3 and layer 4 data. If
+ * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
+ * altogether not IP, fall back on bond_xmit_hash_policy_l2()
+ */
+static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
+{
+	u32 layer4_xor = 0;
+	struct iphdr *iph;
+	struct ipv6hdr *ipv6h;
+	__be32 *s, *d;
+	__be16 *layer4hdr;
+
+	if (skb->protocol == htons(ETH_P_IP) &&
+	    skb_network_header_len(skb) >= sizeof(*iph)) {
+		iph = ip_hdr(skb);
+		if (!ip_is_fragment(iph) &&
+		    (iph->protocol == IPPROTO_TCP ||
+		     iph->protocol == IPPROTO_UDP) &&
+		    (skb_headlen(skb) - skb_network_offset(skb) >=
+		     iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
+			layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
+			layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+		}
+		return (layer4_xor ^
+			((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
+	} else if (skb->protocol == htons(ETH_P_IPV6) &&
+		   skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+		ipv6h = ipv6_hdr(skb);
+		if ((ipv6h->nexthdr == IPPROTO_TCP ||
+		     ipv6h->nexthdr == IPPROTO_UDP) &&
+		    (skb_headlen(skb) - skb_network_offset(skb) >=
+		     sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
+			layer4hdr = (__be16 *)(ipv6h + 1);
+			layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+		}
+		s = &ipv6h->saddr.s6_addr32[0];
+		d = &ipv6h->daddr.s6_addr32[0];
+		layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+		layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
+			       (layer4_xor >> 8);
+		return layer4_xor % count;
+	}
+
+	return bond_xmit_hash_policy_l2(skb, count);
 }
 
 /*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index f0c8bd5..021d69c 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1712,7 +1712,7 @@
 static void
 e100_netpoll(struct net_device* netdev)
 {
-	e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
+	e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
 }
 #endif
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 77bcd4c..463b9ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1278,7 +1278,7 @@
 #define BNX2X_FW_RX_ALIGN_START	(1UL << BNX2X_RX_ALIGN_SHIFT)
 
 #define BNX2X_FW_RX_ALIGN_END					\
-	max(1UL << BNX2X_RX_ALIGN_SHIFT, 			\
+	max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT,			\
 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
 #define BNX2X_PXP_DRAM_ALIGN		(BNX2X_RX_ALIGN_SHIFT - 5)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index dd451c3..02b5a34 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4041,20 +4041,6 @@
 	return val != 0;
 }
 
-/*
- * Reset the load status for the current engine.
- */
-static void bnx2x_clear_load_status(struct bnx2x *bp)
-{
-	u32 val;
-	u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
-		    BNX2X_PATH0_LOAD_CNT_MASK);
-	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
-	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
-}
-
 static void _print_next_block(int idx, const char *blk)
 {
 	pr_cont("%s%s", idx ? ", " : "", blk);
@@ -9384,32 +9370,24 @@
 	return rc;
 }
 
-static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
-{
-	int pos;
-	u32 cap;
-	struct pci_dev *dev = bp->pdev;
-
-	pos = pci_pcie_cap(dev);
-	if (!pos)
-		return false;
-
-	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
-	if (!(cap & PCI_EXP_DEVCAP_FLR))
-		return false;
-
-	return true;
-}
-
 static int __devinit bnx2x_do_flr(struct bnx2x *bp)
 {
 	int i, pos;
 	u16 status;
 	struct pci_dev *dev = bp->pdev;
 
-	/* probe the capability first */
-	if (bnx2x_can_flr(bp))
-		return -ENOTTY;
+
+	if (CHIP_IS_E1x(bp)) {
+		BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
+		return -EINVAL;
+	}
+
+	/* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
+	if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+		BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+			  bp->common.bc_ver);
+		return -EINVAL;
+	}
 
 	pos = pci_pcie_cap(dev);
 	if (!pos)
@@ -9429,12 +9407,8 @@
 		"transaction is not cleared; proceeding with reset anyway\n");
 
 clear:
-	if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
-		BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
-			  bp->common.bc_ver);
-		return -EINVAL;
-	}
 
+	BNX2X_DEV_INFO("Initiating FLR\n");
 	bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
 
 	return 0;
@@ -9454,8 +9428,21 @@
 	 * the one required, then FLR will be sufficient to clean any residue
 	 * left by previous driver
 	 */
-	if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp))
-		return bnx2x_do_flr(bp);
+	rc = bnx2x_test_firmware_version(bp, false);
+
+	if (!rc) {
+		/* fw version is good */
+		BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
+		rc = bnx2x_do_flr(bp);
+	}
+
+	if (!rc) {
+		/* FLR was performed */
+		BNX2X_DEV_INFO("FLR successful\n");
+		return 0;
+	}
+
+	BNX2X_DEV_INFO("Could not FLR\n");
 
 	/* Close the MCP request, return failure*/
 	rc = bnx2x_prev_mcp_done(bp);
@@ -11427,9 +11414,6 @@
 	if (!chip_is_e1x)
 		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 
-	/* Reset the load counter */
-	bnx2x_clear_load_status(bp);
-
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 	dev->netdev_ops = &bnx2x_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 734fd87..62f754b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2485,6 +2485,7 @@
 		break;
 
 	default:
+		kfree(new_cmd);
 		BNX2X_ERR("Unknown command: %d\n", cmd);
 		return -EINVAL;
 	}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c60de89..90a903d8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1948,7 +1948,7 @@
 
 	if (adapter->num_rx_qs != MAX_RX_QS)
 		dev_info(&adapter->pdev->dev,
-			"Created only %d receive queues", adapter->num_rx_qs);
+			"Created only %d receive queues\n", adapter->num_rx_qs);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 3574e14..feff516 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -62,6 +62,13 @@
 	---help---
 	  This driver supports the MDIO bus used by the gianfar and UCC drivers.
 
+config FSL_XGMAC_MDIO
+	tristate "Freescale XGMAC MDIO"
+	depends on FSL_SOC
+	select PHYLIB
+	---help---
+	  This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+
 config UCC_GETH
 	tristate "Freescale QE Gigabit Ethernet"
 	depends on QUICC_ENGINE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 1752488..3d1839a 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -9,6 +9,7 @@
 endif
 obj-$(CONFIG_FS_ENET) += fs_enet/
 obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
 gianfar_driver-objs := gianfar.o \
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 0f2d1a7..1514533 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -174,8 +174,10 @@
 
 	new_bus->phy_mask = ~0;
 	new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
-	if (!new_bus->irq)
+	if (!new_bus->irq) {
+		ret = -ENOMEM;
 		goto out_unmap_regs;
+	}
 
 	new_bus->parent = &ofdev->dev;
 	dev_set_drvdata(&ofdev->dev, new_bus);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 55bb8672..cdf702a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -137,8 +137,10 @@
 	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
 
 	fec->fecp = ioremap(res.start, resource_size(&res));
-	if (!fec->fecp)
+	if (!fec->fecp) {
+		ret = -ENOMEM;
 		goto out_fec;
+	}
 
 	if (get_bus_freq) {
 		clock = get_bus_freq(ofdev->dev.of_node);
@@ -172,8 +174,10 @@
 
 	new_bus->phy_mask = ~0;
 	new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
-	if (!new_bus->irq)
+	if (!new_bus->irq) {
+		ret = -ENOMEM;
 		goto out_unmap_regs;
+	}
 
 	new_bus->parent = &ofdev->dev;
 	dev_set_drvdata(&ofdev->dev, new_bus);
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644
index 0000000..1afb5ea
--- /dev/null
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -0,0 +1,274 @@
+/*
+ * QorIQ 10G MDIO Controller
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Authors: Andy Fleming <afleming@freescale.com>
+ *          Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+
+/* Number of microseconds to wait for a register to respond */
+#define TIMEOUT	1000
+
+struct tgec_mdio_controller {
+	__be32	reserved[12];
+	__be32	mdio_stat;	/* MDIO configuration and status */
+	__be32	mdio_ctl;	/* MDIO control */
+	__be32	mdio_data;	/* MDIO data */
+	__be32	mdio_addr;	/* MDIO address */
+} __packed;
+
+#define MDIO_STAT_CLKDIV(x)	(((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY		(1 << 0)
+#define MDIO_STAT_RD_ER		(1 << 1)
+#define MDIO_CTL_DEV_ADDR(x) 	(x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x)	((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS	(1 << 10)
+#define MDIO_CTL_SCAN_EN	(1 << 11)
+#define MDIO_CTL_POST_INC	(1 << 14)
+#define MDIO_CTL_READ		(1 << 15)
+
+#define MDIO_DATA(x)		(x & 0xffff)
+#define MDIO_DATA_BSY		(1 << 31)
+
+/*
+ * Wait untill the MDIO bus is free
+ */
+static int xgmac_wait_until_free(struct device *dev,
+				 struct tgec_mdio_controller __iomem *regs)
+{
+	uint32_t status;
+
+	/* Wait till the bus is free */
+	status = spin_event_timeout(
+		!((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
+	if (!status) {
+		dev_err(dev, "timeout waiting for bus to be free\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * Wait till the MDIO read or write operation is complete
+ */
+static int xgmac_wait_until_done(struct device *dev,
+				 struct tgec_mdio_controller __iomem *regs)
+{
+	uint32_t status;
+
+	/* Wait till the MDIO write is complete */
+	status = spin_event_timeout(
+		!((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
+	if (!status) {
+		dev_err(dev, "timeout waiting for operation to complete\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * Write value to the PHY for this device to the register at regnum,waiting
+ * until the write is done before it returns.  All PHY configuration has to be
+ * done through the TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+	struct tgec_mdio_controller __iomem *regs = bus->priv;
+	uint16_t dev_addr = regnum >> 16;
+	int ret;
+
+	/* Setup the MII Mgmt clock speed */
+	out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Set the port and dev addr */
+	out_be32(&regs->mdio_ctl,
+		 MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+
+	/* Set the register address */
+	out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Write the value to the register */
+	out_be32(&regs->mdio_data, MDIO_DATA(value));
+
+	ret = xgmac_wait_until_done(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first.  All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+	struct tgec_mdio_controller __iomem *regs = bus->priv;
+	uint16_t dev_addr = regnum >> 16;
+	uint32_t mdio_ctl;
+	uint16_t value;
+	int ret;
+
+	/* Setup the MII Mgmt clock speed */
+	out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Set the Port and Device Addrs */
+	mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+	out_be32(&regs->mdio_ctl, mdio_ctl);
+
+	/* Set the register address */
+	out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Initiate the read */
+	out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+
+	ret = xgmac_wait_until_done(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Return all Fs if nothing was there */
+	if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+		dev_err(&bus->dev, "MDIO read error\n");
+		return 0xffff;
+	}
+
+	value = in_be32(&regs->mdio_data) & 0xffff;
+	dev_dbg(&bus->dev, "read %04x\n", value);
+
+	return value;
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int xgmac_mdio_reset(struct mii_bus *bus)
+{
+	struct tgec_mdio_controller __iomem *regs = bus->priv;
+	int ret;
+
+	mutex_lock(&bus->mdio_lock);
+
+	/* Setup the MII Mgmt clock speed */
+	out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+
+	mutex_unlock(&bus->mdio_lock);
+
+	return ret;
+}
+
+static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct mii_bus *bus;
+	struct resource res;
+	int ret;
+
+	ret = of_address_to_resource(np, 0, &res);
+	if (ret) {
+		dev_err(&pdev->dev, "could not obtain address\n");
+		return ret;
+	}
+
+	bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+	if (!bus)
+		return -ENOMEM;
+
+	bus->name = "Freescale XGMAC MDIO Bus";
+	bus->read = xgmac_mdio_read;
+	bus->write = xgmac_mdio_write;
+	bus->reset = xgmac_mdio_reset;
+	bus->irq = bus->priv;
+	bus->parent = &pdev->dev;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+
+	/* Set the PHY base address */
+	bus->priv = of_iomap(np, 0);
+	if (!bus->priv) {
+		ret = -ENOMEM;
+		goto err_ioremap;
+	}
+
+	ret = of_mdiobus_register(bus, np);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot register MDIO bus\n");
+		goto err_registration;
+	}
+
+	dev_set_drvdata(&pdev->dev, bus);
+
+	return 0;
+
+err_registration:
+	iounmap(bus->priv);
+
+err_ioremap:
+	mdiobus_free(bus);
+
+	return ret;
+}
+
+static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
+{
+	struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
+
+	mdiobus_unregister(bus);
+	iounmap(bus->priv);
+	mdiobus_free(bus);
+
+	return 0;
+}
+
+static struct of_device_id xgmac_mdio_match[] = {
+	{
+		.compatible = "fsl,fman-xmdio",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+
+static struct platform_driver xgmac_mdio_driver = {
+	.driver = {
+		.name = "fsl-fman_xmdio",
+		.of_match_table = xgmac_mdio_match,
+	},
+	.probe = xgmac_mdio_probe,
+	.remove = xgmac_mdio_remove,
+};
+
+module_platform_driver(xgmac_mdio_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 736a7d9..9089d00 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -174,6 +174,20 @@
 
 	ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
 			 hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+	/* MDI-X => 1; MDI => 0 */
+	if ((hw->media_type == e1000_media_type_copper) &&
+	    netif_carrier_ok(netdev))
+		ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+							ETH_TP_MDI_X :
+							ETH_TP_MDI);
+	else
+		ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+	if (hw->mdix == AUTO_ALL_MODES)
+		ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+	else
+		ecmd->eth_tp_mdix_ctrl = hw->mdix;
 	return 0;
 }
 
@@ -183,6 +197,22 @@
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
+	/*
+	 * MDI setting is only allowed when autoneg enabled because
+	 * some hardware doesn't allow MDI setting when speed or
+	 * duplex is forced.
+	 */
+	if (ecmd->eth_tp_mdix_ctrl) {
+		if (hw->media_type != e1000_media_type_copper)
+			return -EOPNOTSUPP;
+
+		if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+		    (ecmd->autoneg != AUTONEG_ENABLE)) {
+			e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+			return -EINVAL;
+		}
+	}
+
 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 		msleep(1);
 
@@ -199,12 +229,21 @@
 		ecmd->advertising = hw->autoneg_advertised;
 	} else {
 		u32 speed = ethtool_cmd_speed(ecmd);
+		/* calling this overrides forced MDI setting */
 		if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
 			clear_bit(__E1000_RESETTING, &adapter->flags);
 			return -EINVAL;
 		}
 	}
 
+	/* MDI-X => 2; MDI => 1; Auto => 3 */
+	if (ecmd->eth_tp_mdix_ctrl) {
+		if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+			hw->mdix = AUTO_ALL_MODES;
+		else
+			hw->mdix = ecmd->eth_tp_mdix_ctrl;
+	}
+
 	/* reset the link */
 
 	if (netif_running(adapter->netdev)) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3bfbb8d..0ae2fcf 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4939,6 +4939,10 @@
 	default:
 		goto err_inval;
 	}
+
+	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+	hw->mdix = AUTO_ALL_MODES;
+
 	return 0;
 
 err_inval:
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b3bade..080c890 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -999,7 +999,7 @@
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-	u32 ctrl, ctrl_ext, eecd;
+	u32 ctrl, ctrl_ext, eecd, tctl;
 	s32 ret_val;
 
 	/*
@@ -1014,7 +1014,9 @@
 	ew32(IMC, 0xffffffff);
 
 	ew32(RCTL, 0);
-	ew32(TCTL, E1000_TCTL_PSP);
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	ew32(TCTL, tctl);
 	e1e_flush();
 
 	usleep_range(10000, 20000);
@@ -1601,10 +1603,8 @@
 			 * auto-negotiation in the TXCW register and disable
 			 * forced link in the Device Control register in an
 			 * attempt to auto-negotiate with our link partner.
-			 * If the partner code word is null, stop forcing
-			 * and restart auto negotiation.
 			 */
-			if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW))  {
+			if (rxcw & E1000_RXCW_C) {
 				/* Enable autoneg, and unforce link up */
 				ew32(TXCW, mac->txcw);
 				ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 0349e24..2e76f06 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -199,6 +199,11 @@
 	else
 		ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
 
+	if (hw->phy.mdix == AUTO_ALL_MODES)
+		ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+	else
+		ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
 	return 0;
 }
 
@@ -241,6 +246,10 @@
 	default:
 		goto err_inval;
 	}
+
+	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+	adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
 	return 0;
 
 err_inval:
@@ -264,6 +273,22 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * MDI setting is only allowed when autoneg enabled because
+	 * some hardware doesn't allow MDI setting when speed or
+	 * duplex is forced.
+	 */
+	if (ecmd->eth_tp_mdix_ctrl) {
+		if (hw->phy.media_type != e1000_media_type_copper)
+			return -EOPNOTSUPP;
+
+		if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+		    (ecmd->autoneg != AUTONEG_ENABLE)) {
+			e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+			return -EINVAL;
+		}
+	}
+
 	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
 		usleep_range(1000, 2000);
 
@@ -282,20 +307,32 @@
 			hw->fc.requested_mode = e1000_fc_default;
 	} else {
 		u32 speed = ethtool_cmd_speed(ecmd);
+		/* calling this overrides forced MDI setting */
 		if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
 			clear_bit(__E1000_RESETTING, &adapter->state);
 			return -EINVAL;
 		}
 	}
 
+	/* MDI-X => 2; MDI => 1; Auto => 3 */
+	if (ecmd->eth_tp_mdix_ctrl) {
+		/*
+		 * fix up the value for auto (3 => 0) as zero is mapped
+		 * internally to auto
+		 */
+		if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+			hw->phy.mdix = AUTO_ALL_MODES;
+		else
+			hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+	}
+
 	/* reset the link */
 
 	if (netif_running(adapter->netdev)) {
 		e1000e_down(adapter);
 		e1000e_up(adapter);
-	} else {
+	} else
 		e1000e_reset(adapter);
-	}
 
 	clear_bit(__E1000_RESETTING, &adapter->state);
 	return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 95b2453..46c3b1f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -178,6 +178,24 @@
 	pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
 }
 
+static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
+				 struct e1000_buffer *bi)
+{
+	int i;
+	struct e1000_ps_page *ps_page;
+
+	for (i = 0; i < adapter->rx_ps_pages; i++) {
+		ps_page = &bi->ps_pages[i];
+
+		if (ps_page->page) {
+			pr_info("packet dump for ps_page %d:\n", i);
+			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+				       16, 1, page_address(ps_page->page),
+				       PAGE_SIZE, true);
+		}
+	}
+}
+
 /*
  * e1000e_dump - Print registers, Tx-ring and Rx-ring
  */
@@ -299,10 +317,10 @@
 			(unsigned long long)buffer_info->time_stamp,
 			buffer_info->skb, next_desc);
 
-		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+		if (netif_msg_pktdata(adapter) && buffer_info->skb)
 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
-				       16, 1, phys_to_virt(buffer_info->dma),
-				       buffer_info->length, true);
+				       16, 1, buffer_info->skb->data,
+				       buffer_info->skb->len, true);
 	}
 
 	/* Print Rx Ring Summary */
@@ -381,10 +399,8 @@
 					buffer_info->skb, next_desc);
 
 				if (netif_msg_pktdata(adapter))
-					print_hex_dump(KERN_INFO, "",
-						DUMP_PREFIX_ADDRESS, 16, 1,
-						phys_to_virt(buffer_info->dma),
-						adapter->rx_ps_bsize0, true);
+					e1000e_dump_ps_pages(adapter,
+							     buffer_info);
 			}
 		}
 		break;
@@ -444,12 +460,12 @@
 					(unsigned long long)buffer_info->dma,
 					buffer_info->skb, next_desc);
 
-				if (netif_msg_pktdata(adapter))
+				if (netif_msg_pktdata(adapter) &&
+				    buffer_info->skb)
 					print_hex_dump(KERN_INFO, "",
 						       DUMP_PREFIX_ADDRESS, 16,
 						       1,
-						       phys_to_virt
-						       (buffer_info->dma),
+						       buffer_info->skb->data,
 						       adapter->rx_buffer_len,
 						       true);
 			}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b860d4f..fc62a3f 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -84,8 +84,9 @@
 #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
 
 /* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK    0x0600
 
 /* I82577 PHY Diagnostics Status */
 #define I82577_DSTATUS_CABLE_LENGTH       0x03FC
@@ -702,6 +703,32 @@
 	if (ret_val)
 		return ret_val;
 
+	/* Set MDI/MDIX mode */
+	ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+	if (ret_val)
+		return ret_val;
+	phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+	/*
+	 * Options:
+	 *   0 - Auto (default)
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 */
+	switch (hw->phy.mdix) {
+	case 1:
+		break;
+	case 2:
+		phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+		break;
+	case 0:
+	default:
+		phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+		break;
+	}
+	ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+	if (ret_val)
+		return ret_val;
+
 	return e1000_set_master_slave_mode(hw);
 }
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 5e84eaa..ba994fb 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -254,6 +254,14 @@
 	 */
 	size += NVM_WORD_SIZE_BASE_SHIFT;
 
+	/*
+	 * Check for invalid size
+	 */
+	if ((hw->mac.type == e1000_82576) && (size > 15)) {
+		pr_notice("The NVM size is not valid, defaulting to 32K\n");
+		size = 15;
+	}
+
 	nvm->word_size = 1 << size;
 	if (hw->mac.type < e1000_i210) {
 		nvm->opcode_bits        = 8;
@@ -281,14 +289,6 @@
 	} else
 		nvm->type = e1000_nvm_flash_hw;
 
-	/*
-	 * Check for invalid size
-	 */
-	if ((hw->mac.type == e1000_82576) && (size > 15)) {
-		pr_notice("The NVM size is not valid, defaulting to 32K\n");
-		size = 15;
-	}
-
 	/* NVM Function Pointers */
 	switch (hw->mac.type) {
 	case e1000_82580:
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 7be98b6..3404bc7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -464,6 +464,32 @@
 	phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
 
 	ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Set MDI/MDIX mode */
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+	if (ret_val)
+		goto out;
+	phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+	/*
+	 * Options:
+	 *   0 - Auto (default)
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 */
+	switch (hw->phy.mdix) {
+	case 1:
+		break;
+	case 2:
+		phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+		break;
+	case 0:
+	default:
+		phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+		break;
+	}
+	ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
 
 out:
 	return ret_val;
@@ -2246,8 +2272,7 @@
 	if (ret_val)
 		goto out;
 
-	phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
-	phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
+	phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
 
 	ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
 	if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 34e4061..6ac3299 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -111,8 +111,9 @@
 #define I82580_PHY_STATUS2_SPEED_100MBPS  0x0100
 
 /* I82580 PHY Control 2 */
-#define I82580_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82580_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+#define I82580_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82580_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82580_PHY_CTRL2_MDIX_CFG_MASK    0x0600
 
 /* I82580 PHY Diagnostics Status */
 #define I82580_DSTATUS_CABLE_LENGTH       0x03FC
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 10efcd88..28394be 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -156,8 +156,12 @@
 				    : (0x0E018 + ((_n) * 0x40)))
 #define E1000_TXDCTL(_n)  ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
 				    : (0x0E028 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_RXCTL(_n)	  ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+				      (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n)	E1000_RXCTL(_n)
+#define E1000_TXCTL(_n)   ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+				      (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
 #define E1000_TDWBAL(_n)  ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
 				    : (0x0E038 + ((_n) * 0x40)))
 #define E1000_TDWBAH(_n)  ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a19c84c..be02168 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -198,6 +198,19 @@
 	}
 
 	ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+	/* MDI-X => 2; MDI =>1; Invalid =>0 */
+	if (hw->phy.media_type == e1000_media_type_copper)
+		ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+						      ETH_TP_MDI;
+	else
+		ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+	if (hw->phy.mdix == AUTO_ALL_MODES)
+		ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+	else
+		ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
 	return 0;
 }
 
@@ -209,11 +222,27 @@
 	/* When SoL/IDER sessions are active, autoneg/speed/duplex
 	 * cannot be changed */
 	if (igb_check_reset_block(hw)) {
-		dev_err(&adapter->pdev->dev, "Cannot change link "
-			"characteristics when SoL/IDER is active.\n");
+		dev_err(&adapter->pdev->dev,
+			"Cannot change link characteristics when SoL/IDER is active.\n");
 		return -EINVAL;
 	}
 
+	/*
+	 * MDI setting is only allowed when autoneg enabled because
+	 * some hardware doesn't allow MDI setting when speed or
+	 * duplex is forced.
+	 */
+	if (ecmd->eth_tp_mdix_ctrl) {
+		if (hw->phy.media_type != e1000_media_type_copper)
+			return -EOPNOTSUPP;
+
+		if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+		    (ecmd->autoneg != AUTONEG_ENABLE)) {
+			dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+			return -EINVAL;
+		}
+	}
+
 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
 		msleep(1);
 
@@ -227,12 +256,25 @@
 			hw->fc.requested_mode = e1000_fc_default;
 	} else {
 		u32 speed = ethtool_cmd_speed(ecmd);
+		/* calling this overrides forced MDI setting */
 		if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
 			clear_bit(__IGB_RESETTING, &adapter->state);
 			return -EINVAL;
 		}
 	}
 
+	/* MDI-X => 2; MDI => 1; Auto => 3 */
+	if (ecmd->eth_tp_mdix_ctrl) {
+		/*
+		 * fix up the value for auto (3 => 0) as zero is mapped
+		 * internally to auto
+		 */
+		if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+			hw->phy.mdix = AUTO_ALL_MODES;
+		else
+			hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+	}
+
 	/* reset the link */
 	if (netif_running(adapter->netdev)) {
 		igb_down(adapter);
@@ -1089,8 +1131,8 @@
 		wr32(reg, (_test[pat] & write));
 		val = rd32(reg) & mask;
 		if (val != (_test[pat] & write & mask)) {
-			dev_err(&adapter->pdev->dev, "pattern test reg %04X "
-				"failed: got 0x%08X expected 0x%08X\n",
+			dev_err(&adapter->pdev->dev,
+				"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
 				reg, val, (_test[pat] & write & mask));
 			*data = reg;
 			return 1;
@@ -1108,8 +1150,8 @@
 	wr32(reg, write & mask);
 	val = rd32(reg);
 	if ((write & mask) != (val & mask)) {
-		dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
-			" got 0x%08X expected 0x%08X\n", reg,
+		dev_err(&adapter->pdev->dev,
+			"set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
 			(val & mask), (write & mask));
 		*data = reg;
 		return 1;
@@ -1171,8 +1213,9 @@
 	wr32(E1000_STATUS, toggle);
 	after = rd32(E1000_STATUS) & toggle;
 	if (value != after) {
-		dev_err(&adapter->pdev->dev, "failed STATUS register test "
-			"got: 0x%08X expected: 0x%08X\n", after, value);
+		dev_err(&adapter->pdev->dev,
+			"failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+			after, value);
 		*data = 1;
 		return 1;
 	}
@@ -1497,6 +1540,9 @@
 		break;
 	}
 
+	/* add small delay to avoid loopback test failure */
+	msleep(50);
+
 	/* force 1000, set loopback */
 	igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
 
@@ -1777,16 +1823,14 @@
 	 * sessions are active */
 	if (igb_check_reset_block(&adapter->hw)) {
 		dev_err(&adapter->pdev->dev,
-			"Cannot do PHY loopback test "
-			"when SoL/IDER is active.\n");
+			"Cannot do PHY loopback test when SoL/IDER is active.\n");
 		*data = 0;
 		goto out;
 	}
 	if ((adapter->hw.mac.type == e1000_i210)
-		|| (adapter->hw.mac.type == e1000_i210)) {
+		|| (adapter->hw.mac.type == e1000_i211)) {
 		dev_err(&adapter->pdev->dev,
-			"Loopback test not supported "
-			"on this part at this time.\n");
+			"Loopback test not supported on this part at this time.\n");
 		*data = 0;
 		goto out;
 	}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b7c2d50..73cc273 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -462,10 +462,10 @@
 				(u64)buffer_info->time_stamp,
 				buffer_info->skb, next_desc);
 
-			if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+			if (netif_msg_pktdata(adapter) && buffer_info->skb)
 				print_hex_dump(KERN_INFO, "",
 					DUMP_PREFIX_ADDRESS,
-					16, 1, phys_to_virt(buffer_info->dma),
+					16, 1, buffer_info->skb->data,
 					buffer_info->length, true);
 		}
 	}
@@ -547,18 +547,17 @@
 					(u64)buffer_info->dma,
 					buffer_info->skb, next_desc);
 
-				if (netif_msg_pktdata(adapter)) {
+				if (netif_msg_pktdata(adapter) &&
+				    buffer_info->dma && buffer_info->skb) {
 					print_hex_dump(KERN_INFO, "",
-						DUMP_PREFIX_ADDRESS,
-						16, 1,
-						phys_to_virt(buffer_info->dma),
-						IGB_RX_HDR_LEN, true);
+						  DUMP_PREFIX_ADDRESS,
+						  16, 1, buffer_info->skb->data,
+						  IGB_RX_HDR_LEN, true);
 					print_hex_dump(KERN_INFO, "",
 					  DUMP_PREFIX_ADDRESS,
 					  16, 1,
-					  phys_to_virt(
-					    buffer_info->page_dma +
-					    buffer_info->page_offset),
+					  page_address(buffer_info->page) +
+						      buffer_info->page_offset,
 					  PAGE_SIZE/2, true);
 				}
 			}
@@ -6676,6 +6675,10 @@
 	default:
 		goto err_inval;
 	}
+
+	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+	adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
 	return 0;
 
 err_inval:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b9623e9..bffcf1f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -78,6 +78,9 @@
 
 /* Supported Rx Buffer Sizes */
 #define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
+#define IXGBE_RXBUFFER_2K    2048
+#define IXGBE_RXBUFFER_3K    3072
+#define IXGBE_RXBUFFER_4K    4096
 #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
 
 /*
@@ -104,6 +107,7 @@
 #define IXGBE_TX_FLAGS_FSO		(u32)(1 << 6)
 #define IXGBE_TX_FLAGS_TXSW		(u32)(1 << 7)
 #define IXGBE_TX_FLAGS_TSTAMP		(u32)(1 << 8)
+#define IXGBE_TX_FLAGS_NO_IFCS		(u32)(1 << 9)
 #define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -293,16 +297,25 @@
  * this is twice the size of a half page we need to double the page order
  * for FCoE enabled Rx queues.
  */
-#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
+static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
+{
+#ifdef IXGBE_FCOE
+	if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+		return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
+					    IXGBE_RXBUFFER_3K;
+#endif
+	return IXGBE_RXBUFFER_2K;
+}
+
 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 {
-	return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
-}
-#else
-#define ixgbe_rx_pg_order(_ring) 0
+#ifdef IXGBE_FCOE
+	if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+		return (PAGE_SIZE < 8192) ? 1 : 0;
 #endif
+	return 0;
+}
 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
-#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
 
 struct ixgbe_ring_container {
 	struct ixgbe_ring *ring;	/* pointer to linked list of rings */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 50fc137..18bf08c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -804,12 +804,13 @@
 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 		/* Set KX4/KX/KR support according to speed requested */
 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
-		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
 				autoc |= IXGBE_AUTOC_KX4_SUPP;
 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
 			    (hw->phy.smart_speed_active == false))
 				autoc |= IXGBE_AUTOC_KR_SUPP;
+		}
 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 			autoc |= IXGBE_AUTOC_KX_SUPP;
 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 4326f74..fa0d6e1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1167,7 +1167,7 @@
 	}
 
 	bi->dma = dma;
-	bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+	bi->page_offset = 0;
 
 	return true;
 }
@@ -1320,29 +1320,6 @@
 		return max_len;
 }
 
-static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
-			      union ixgbe_adv_rx_desc *rx_desc,
-			      struct sk_buff *skb)
-{
-	__le32 rsc_enabled;
-	u32 rsc_cnt;
-
-	if (!ring_is_rsc_enabled(rx_ring))
-		return;
-
-	rsc_enabled = rx_desc->wb.lower.lo_dword.data &
-		      cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
-
-	/* If this is an RSC frame rsc_cnt should be non-zero */
-	if (!rsc_enabled)
-		return;
-
-	rsc_cnt = le32_to_cpu(rsc_enabled);
-	rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
-
-	IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
-}
-
 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
 				   struct sk_buff *skb)
 {
@@ -1440,16 +1417,28 @@
 
 	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
 
+	/* update RSC append count if present */
+	if (ring_is_rsc_enabled(rx_ring)) {
+		__le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+				     cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
+
+		if (unlikely(rsc_enabled)) {
+			u32 rsc_cnt = le32_to_cpu(rsc_enabled);
+
+			rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
+			IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
+
+			/* update ntc based on RSC value */
+			ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+			ntc &= IXGBE_RXDADV_NEXTP_MASK;
+			ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+		}
+	}
+
+	/* if we are the last buffer then there is nothing else to do */
 	if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
 		return false;
 
-	/* append_cnt indicates packet is RSC, if so fetch nextp */
-	if (IXGBE_CB(skb)->append_cnt) {
-		ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
-		ntc &= IXGBE_RXDADV_NEXTP_MASK;
-		ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
-	}
-
 	/* place skb in next buffer to be received */
 	rx_ring->rx_buffer_info[ntc].skb = skb;
 	rx_ring->rx_stats.non_eop_descs++;
@@ -1458,6 +1447,78 @@
 }
 
 /**
+ * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ixgbe specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
+			    struct sk_buff *skb)
+{
+	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+	unsigned char *va;
+	unsigned int pull_len;
+
+	/*
+	 * it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	/*
+	 * we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	frag->page_offset += pull_len;
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+/**
+ * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being updated
+ *
+ * This function provides a basic DMA sync up for the first fragment of an
+ * skb.  The reason for doing this is that the first fragment cannot be
+ * unmapped until we have reached the end of packet descriptor for a buffer
+ * chain.
+ */
+static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+				struct sk_buff *skb)
+{
+	/* if the page was released unmap it, else just sync our portion */
+	if (unlikely(IXGBE_CB(skb)->page_released)) {
+		dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+			       ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+		IXGBE_CB(skb)->page_released = false;
+	} else {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+
+		dma_sync_single_range_for_cpu(rx_ring->dev,
+					      IXGBE_CB(skb)->dma,
+					      frag->page_offset,
+					      ixgbe_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+	}
+	IXGBE_CB(skb)->dma = 0;
+}
+
+/**
  * ixgbe_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
  * @rx_desc: pointer to the EOP Rx descriptor
@@ -1479,24 +1540,7 @@
 				  union ixgbe_adv_rx_desc *rx_desc,
 				  struct sk_buff *skb)
 {
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
 	struct net_device *netdev = rx_ring->netdev;
-	unsigned char *va;
-	unsigned int pull_len;
-
-	/* if the page was released unmap it, else just sync our portion */
-	if (unlikely(IXGBE_CB(skb)->page_released)) {
-		dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
-			       ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
-		IXGBE_CB(skb)->page_released = false;
-	} else {
-		dma_sync_single_range_for_cpu(rx_ring->dev,
-					      IXGBE_CB(skb)->dma,
-					      frag->page_offset,
-					      ixgbe_rx_bufsz(rx_ring),
-					      DMA_FROM_DEVICE);
-	}
-	IXGBE_CB(skb)->dma = 0;
 
 	/* verify that the packet does not have any known errors */
 	if (unlikely(ixgbe_test_staterr(rx_desc,
@@ -1506,40 +1550,9 @@
 		return true;
 	}
 
-	/*
-	 * it is valid to use page_address instead of kmap since we are
-	 * working with pages allocated out of the lomem pool per
-	 * alloc_page(GFP_ATOMIC)
-	 */
-	va = skb_frag_address(frag);
-
-	/*
-	 * we need the header to contain the greater of either ETH_HLEN or
-	 * 60 bytes if the skb->len is less than 60 for skb_pad.
-	 */
-	pull_len = skb_frag_size(frag);
-	if (pull_len > IXGBE_RX_HDR_SIZE)
-		pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
-
-	/* align pull length to size of long to optimize memcpy performance */
-	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-	/* update all of the pointers */
-	skb_frag_size_sub(frag, pull_len);
-	frag->page_offset += pull_len;
-	skb->data_len -= pull_len;
-	skb->tail += pull_len;
-
-	/*
-	 * if we sucked the frag empty then we should free it,
-	 * if there are other frags here something is screwed up in hardware
-	 */
-	if (skb_frag_size(frag) == 0) {
-		BUG_ON(skb_shinfo(skb)->nr_frags != 1);
-		skb_shinfo(skb)->nr_frags = 0;
-		__skb_frag_unref(frag);
-		skb->truesize -= ixgbe_rx_bufsz(rx_ring);
-	}
+	/* place header in linear portion of buffer */
+	if (skb_is_nonlinear(skb))
+		ixgbe_pull_tail(rx_ring, skb);
 
 #ifdef IXGBE_FCOE
 	/* do not attempt to pad FCoE Frames as this will disrupt DDP */
@@ -1560,33 +1573,17 @@
 }
 
 /**
- * ixgbe_can_reuse_page - determine if we can reuse a page
- * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
- *
- * Returns true if page can be reused in another Rx buffer
- **/
-static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
-{
-	struct page *page = rx_buffer->page;
-
-	/* if we are only owner of page and it is local we can reuse it */
-	return likely(page_count(page) == 1) &&
-	       likely(page_to_nid(page) == numa_node_id());
-}
-
-/**
  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
  * @rx_ring: rx descriptor ring to store buffers on
  * @old_buff: donor buffer to have page reused
  *
- * Syncronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
  **/
 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
 				struct ixgbe_rx_buffer *old_buff)
 {
 	struct ixgbe_rx_buffer *new_buff;
 	u16 nta = rx_ring->next_to_alloc;
-	u16 bufsz = ixgbe_rx_bufsz(rx_ring);
 
 	new_buff = &rx_ring->rx_buffer_info[nta];
 
@@ -1597,17 +1594,13 @@
 	/* transfer page from old buffer to new buffer */
 	new_buff->page = old_buff->page;
 	new_buff->dma = old_buff->dma;
-
-	/* flip page offset to other buffer and store to new_buff */
-	new_buff->page_offset = old_buff->page_offset ^ bufsz;
+	new_buff->page_offset = old_buff->page_offset;
 
 	/* sync the buffer for use by the device */
 	dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
-					 new_buff->page_offset, bufsz,
+					 new_buff->page_offset,
+					 ixgbe_rx_bufsz(rx_ring),
 					 DMA_FROM_DEVICE);
-
-	/* bump ref count on page before it is given to the stack */
-	get_page(new_buff->page);
 }
 
 /**
@@ -1617,20 +1610,159 @@
  * @rx_desc: descriptor containing length of buffer written by hardware
  * @skb: sk_buff to place the data into
  *
- * This function is based on skb_add_rx_frag.  I would have used that
- * function however it doesn't handle the truesize case correctly since we
- * are allocating more memory than might be used for a single receive.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
  **/
-static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
 			      struct ixgbe_rx_buffer *rx_buffer,
-			      struct sk_buff *skb, int size)
+			      union ixgbe_adv_rx_desc *rx_desc,
+			      struct sk_buff *skb)
 {
-	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-			   rx_buffer->page, rx_buffer->page_offset,
-			   size);
-	skb->len += size;
-	skb->data_len += size;
-	skb->truesize += ixgbe_rx_bufsz(rx_ring);
+	struct page *page = rx_buffer->page;
+	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+#else
+	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+	unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
+				   ixgbe_rx_bufsz(rx_ring);
+#endif
+
+	if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+		/* we can reuse buffer as-is, just make sure it is local */
+		if (likely(page_to_nid(page) == numa_node_id()))
+			return true;
+
+		/* this page cannot be reused so discard it */
+		put_page(page);
+		return false;
+	}
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+			rx_buffer->page_offset, size, truesize);
+
+	/* avoid re-using remote pages */
+	if (unlikely(page_to_nid(page) != numa_node_id()))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely(page_count(page) != 1))
+		return false;
+
+	/* flip page offset to other buffer */
+	rx_buffer->page_offset ^= truesize;
+
+	/*
+	 * since we are the only owner of the page and we need to
+	 * increment it, just set the value to 2 in order to avoid
+	 * an unecessary locked operation
+	 */
+	atomic_set(&page->_count, 2);
+#else
+	/* move offset up to the next cache line */
+	rx_buffer->page_offset += truesize;
+
+	if (rx_buffer->page_offset > last_offset)
+		return false;
+
+	/* bump ref count on page before it is given to the stack */
+	get_page(page);
+#endif
+
+	return true;
+}
+
+static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
+					     union ixgbe_adv_rx_desc *rx_desc)
+{
+	struct ixgbe_rx_buffer *rx_buffer;
+	struct sk_buff *skb;
+	struct page *page;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	page = rx_buffer->page;
+	prefetchw(page);
+
+	skb = rx_buffer->skb;
+
+	if (likely(!skb)) {
+		void *page_addr = page_address(page) +
+				  rx_buffer->page_offset;
+
+		/* prefetch first cache line of first page */
+		prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+		prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+		/* allocate a skb to store the frags */
+		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+						IXGBE_RX_HDR_SIZE);
+		if (unlikely(!skb)) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			return NULL;
+		}
+
+		/*
+		 * we will be copying header into skb->data in
+		 * pskb_may_pull so it is in our interest to prefetch
+		 * it now to avoid a possible cache miss
+		 */
+		prefetchw(skb->data);
+
+		/*
+		 * Delay unmapping of the first packet. It carries the
+		 * header information, HW may still access the header
+		 * after the writeback.  Only unmap it when EOP is
+		 * reached
+		 */
+		if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+			goto dma_sync;
+
+		IXGBE_CB(skb)->dma = rx_buffer->dma;
+	} else {
+		if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+			ixgbe_dma_sync_frag(rx_ring, skb);
+
+dma_sync:
+		/* we are reusing so sync this buffer for CPU use */
+		dma_sync_single_range_for_cpu(rx_ring->dev,
+					      rx_buffer->dma,
+					      rx_buffer->page_offset,
+					      ixgbe_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+	}
+
+	/* pull page into skb */
+	if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+		/* hand second half of page back to the ring */
+		ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+	} else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+		/* the page has been released from the ring */
+		IXGBE_CB(skb)->page_released = true;
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+			       ixgbe_rx_pg_size(rx_ring),
+			       DMA_FROM_DEVICE);
+	}
+
+	/* clear contents of buffer_info */
+	rx_buffer->skb = NULL;
+	rx_buffer->dma = 0;
+	rx_buffer->page = NULL;
+
+	return skb;
 }
 
 /**
@@ -1658,11 +1790,8 @@
 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 
 	do {
-		struct ixgbe_rx_buffer *rx_buffer;
 		union ixgbe_adv_rx_desc *rx_desc;
 		struct sk_buff *skb;
-		struct page *page;
-		u16 ntc;
 
 		/* return some buffers to hardware, one at a time is too slow */
 		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1670,9 +1799,7 @@
 			cleaned_count = 0;
 		}
 
-		ntc = rx_ring->next_to_clean;
-		rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
-		rx_buffer = &rx_ring->rx_buffer_info[ntc];
+		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
 		if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
 			break;
@@ -1684,75 +1811,12 @@
 		 */
 		rmb();
 
-		page = rx_buffer->page;
-		prefetchw(page);
+		/* retrieve a buffer from the ring */
+		skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
 
-		skb = rx_buffer->skb;
-
-		if (likely(!skb)) {
-			void *page_addr = page_address(page) +
-					  rx_buffer->page_offset;
-
-			/* prefetch first cache line of first page */
-			prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
-			prefetch(page_addr + L1_CACHE_BYTES);
-#endif
-
-			/* allocate a skb to store the frags */
-			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-							IXGBE_RX_HDR_SIZE);
-			if (unlikely(!skb)) {
-				rx_ring->rx_stats.alloc_rx_buff_failed++;
-				break;
-			}
-
-			/*
-			 * we will be copying header into skb->data in
-			 * pskb_may_pull so it is in our interest to prefetch
-			 * it now to avoid a possible cache miss
-			 */
-			prefetchw(skb->data);
-
-			/*
-			 * Delay unmapping of the first packet. It carries the
-			 * header information, HW may still access the header
-			 * after the writeback.  Only unmap it when EOP is
-			 * reached
-			 */
-			IXGBE_CB(skb)->dma = rx_buffer->dma;
-		} else {
-			/* we are reusing so sync this buffer for CPU use */
-			dma_sync_single_range_for_cpu(rx_ring->dev,
-						      rx_buffer->dma,
-						      rx_buffer->page_offset,
-						      ixgbe_rx_bufsz(rx_ring),
-						      DMA_FROM_DEVICE);
-		}
-
-		/* pull page into skb */
-		ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
-				  le16_to_cpu(rx_desc->wb.upper.length));
-
-		if (ixgbe_can_reuse_page(rx_buffer)) {
-			/* hand second half of page back to the ring */
-			ixgbe_reuse_rx_page(rx_ring, rx_buffer);
-		} else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
-			/* the page has been released from the ring */
-			IXGBE_CB(skb)->page_released = true;
-		} else {
-			/* we are not reusing the buffer so unmap it */
-			dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-				       ixgbe_rx_pg_size(rx_ring),
-				       DMA_FROM_DEVICE);
-		}
-
-		/* clear contents of buffer_info */
-		rx_buffer->skb = NULL;
-		rx_buffer->dma = 0;
-		rx_buffer->page = NULL;
-
-		ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+		/* exit if we failed to retrieve a buffer */
+		if (!skb)
+			break;
 
 		cleaned_count++;
 
@@ -2868,11 +2932,7 @@
 	srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
 	/* configure the packet buffer length */
-#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
-	srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#else
 	srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#endif
 
 	/* configure descriptor type */
 	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -2980,13 +3040,7 @@
 	 * total size of max desc * buf_len is not greater
 	 * than 65536
 	 */
-#if (PAGE_SIZE <= 8192)
 	rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (PAGE_SIZE <= 16384)
-	rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#else
-	rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#endif
 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 }
 
@@ -4130,27 +4184,6 @@
 }
 
 /**
- * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
- * @rx_ring: ring to setup
- *
- * On many IA platforms the L1 cache has a critical stride of 4K, this
- * results in each receive buffer starting in the same cache set.  To help
- * reduce the pressure on this cache set we can interleave the offsets so
- * that only every other buffer will be in the same cache set.
- **/
-static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
-{
-	struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
-	u16 i;
-
-	for (i = 0; i < rx_ring->count; i += 2) {
-		rx_buffer[0].page_offset = 0;
-		rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
-		rx_buffer = &rx_buffer[2];
-	}
-}
-
-/**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @rx_ring: ring to free buffers from
  **/
@@ -4195,8 +4228,6 @@
 	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 	memset(rx_ring->rx_buffer_info, 0, size);
 
-	ixgbe_init_rx_page_offset(rx_ring);
-
 	/* Zero out the descriptor ring */
 	memset(rx_ring->desc, 0, rx_ring->size);
 
@@ -4646,8 +4677,6 @@
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
-	ixgbe_init_rx_page_offset(rx_ring);
-
 	return 0;
 err:
 	vfree(rx_ring->rx_buffer_info);
@@ -5874,9 +5903,12 @@
 	u32 type_tucmd = 0;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
-		if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
-		    !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
-			return;
+		if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
+			if (unlikely(skb->no_fcs))
+				first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
+			if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
+				return;
+		}
 	} else {
 		u8 l4_hdr = 0;
 		switch (first->protocol) {
@@ -5938,7 +5970,6 @@
 {
 	/* set type for advanced descriptor with frame checksum insertion */
 	__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
-				      IXGBE_ADVTXD_DCMD_IFCS |
 				      IXGBE_ADVTXD_DCMD_DEXT);
 
 	/* set HW vlan bit if vlan is present */
@@ -5958,6 +5989,10 @@
 #endif
 		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
 
+	/* insert frame checksum */
+	if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
+		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+
 	return cmd_type;
 }
 
@@ -6063,8 +6098,6 @@
 		if (likely(!data_len))
 			break;
 
-		if (unlikely(skb->no_fcs))
-			cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
 		tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
 
 		i++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f32e703..5aba5ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -614,8 +614,8 @@
 		/* If source MAC is equal to our own MAC and not performing
 		 * the selftest or flb disabled - drop the packet */
 		if (s_mac == priv->mac &&
-			(!(dev->features & NETIF_F_LOOPBACK) ||
-			 !priv->validate_loopback))
+		    !((dev->features & NETIF_F_LOOPBACK) ||
+		      priv->validate_loopback))
 			goto next;
 
 		/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 019d856..10bba09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -164,7 +164,6 @@
 	ring->cons = 0xffffffff;
 	ring->last_nr_txbb = 1;
 	ring->poll_cnt = 0;
-	ring->blocked = 0;
 	memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
 	memset(ring->buf, 0, ring->buf_size);
 
@@ -365,14 +364,13 @@
 	ring->cons += txbbs_skipped;
 	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
 
-	/* Wakeup Tx queue if this ring stopped it */
-	if (unlikely(ring->blocked)) {
-		if ((u32) (ring->prod - ring->cons) <=
-		     ring->size - HEADROOM - MAX_DESC_TXBBS) {
-			ring->blocked = 0;
-			netif_tx_wake_queue(ring->tx_queue);
-			priv->port_stats.wake_queue++;
-		}
+	/*
+	 * Wakeup Tx queue if this stopped, and at least 1 packet
+	 * was completed
+	 */
+	if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
+		netif_tx_wake_queue(ring->tx_queue);
+		priv->port_stats.wake_queue++;
 	}
 }
 
@@ -592,7 +590,6 @@
 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
 		/* every full Tx ring stops queue */
 		netif_tx_stop_queue(ring->tx_queue);
-		ring->blocked = 1;
 		priv->port_stats.queue_stopped++;
 
 		return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 88b7b3e7..daf4179 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -358,13 +358,14 @@
 }
 
 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-			u64 virt, int obj_size,	int nobj, int reserved,
+			u64 virt, int obj_size,	u32 nobj, int reserved,
 			int use_lowmem, int use_coherent)
 {
 	int obj_per_chunk;
 	int num_icm;
 	unsigned chunk_size;
 	int i;
+	u64 size;
 
 	obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
 	num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
@@ -380,10 +381,12 @@
 	table->coherent = use_coherent;
 	mutex_init(&table->mutex);
 
+	size = (u64) nobj * obj_size;
 	for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
 		chunk_size = MLX4_TABLE_CHUNK_SIZE;
-		if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
-			chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
+		if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
+			chunk_size = PAGE_ALIGN(size -
+					i * MLX4_TABLE_CHUNK_SIZE);
 
 		table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
 					       (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index 19e4efc..a67744f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -78,7 +78,7 @@
 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
 			  int start, int end);
 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-			u64 virt, int obj_size,	int nobj, int reserved,
+			u64 virt, int obj_size,	u32 nobj, int reserved,
 			int use_lowmem, int use_coherent);
 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
 void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 48d0e901..827b72d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -157,9 +157,6 @@
 					 "on this HCA, aborting.\n");
 				return -EINVAL;
 			}
-			if (port_type[i] == MLX4_PORT_TYPE_ETH &&
-			    port_type[i + 1] == MLX4_PORT_TYPE_IB)
-				return -EINVAL;
 		}
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 4ec3835..a018ea2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -432,8 +432,10 @@
 			if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
 				/* Entry already exists, add to duplicates */
 				dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
-				if (!dqp)
+				if (!dqp) {
+					err = -ENOMEM;
 					goto out_mailbox;
+				}
 				dqp->qpn = qpn;
 				list_add_tail(&dqp->list, &entry->duplicates);
 				found = true;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 59ebc03..4d9df8f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -249,7 +249,7 @@
 struct mlx4_buddy {
 	unsigned long	      **bits;
 	unsigned int	       *num_free;
-	int			max_order;
+	u32			max_order;
 	spinlock_t		lock;
 };
 
@@ -258,7 +258,7 @@
 struct mlx4_icm_table {
 	u64			virt;
 	int			num_icm;
-	int			num_obj;
+	u32			num_obj;
 	int			obj_size;
 	int			lowmem;
 	int			coherent;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5f1ab10..9d27e42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -248,7 +248,6 @@
 	u32 doorbell_qpn;
 	void *buf;
 	u16 poll_cnt;
-	int blocked;
 	struct mlx4_en_tx_info *tx_info;
 	u8 *bounce_buf;
 	u32 last_nr_txbb;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index af55b7c..c202d3a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -37,6 +37,7 @@
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/vmalloc.h>
 
 #include <linux/mlx4/cmd.h>
 
@@ -120,7 +121,7 @@
 	buddy->max_order = max_order;
 	spin_lock_init(&buddy->lock);
 
-	buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
+	buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
 			      GFP_KERNEL);
 	buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
 				  GFP_KERNEL);
@@ -129,10 +130,12 @@
 
 	for (i = 0; i <= buddy->max_order; ++i) {
 		s = BITS_TO_LONGS(1 << (buddy->max_order - i));
-		buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
-		if (!buddy->bits[i])
-			goto err_out_free;
-		bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+		buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
+		if (!buddy->bits[i]) {
+			buddy->bits[i] = vzalloc(s * sizeof(long));
+			if (!buddy->bits[i])
+				goto err_out_free;
+		}
 	}
 
 	set_bit(0, buddy->bits[buddy->max_order]);
@@ -142,7 +145,10 @@
 
 err_out_free:
 	for (i = 0; i <= buddy->max_order; ++i)
-		kfree(buddy->bits[i]);
+		if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
+			vfree(buddy->bits[i]);
+		else
+			kfree(buddy->bits[i]);
 
 err_out:
 	kfree(buddy->bits);
@@ -156,7 +162,10 @@
 	int i;
 
 	for (i = 0; i <= buddy->max_order; ++i)
-		kfree(buddy->bits[i]);
+		if (is_vmalloc_addr(buddy->bits[i]))
+			vfree(buddy->bits[i]);
+		else
+			kfree(buddy->bits[i]);
 
 	kfree(buddy->bits);
 	kfree(buddy->num_free);
@@ -668,7 +677,7 @@
 		return err;
 
 	err = mlx4_buddy_init(&mr_table->mtt_buddy,
-			      ilog2(dev->caps.num_mtts /
+			      ilog2((u32)dev->caps.num_mtts /
 			      (1 << log_mtts_per_seg)));
 	if (err)
 		goto err_buddy;
@@ -678,7 +687,7 @@
 			mlx4_alloc_mtt_range(dev,
 					     fls(dev->caps.reserved_mtts - 1));
 		if (priv->reserved_mtts < 0) {
-			mlx4_warn(dev, "MTT table of order %d is too small.\n",
+			mlx4_warn(dev, "MTT table of order %u is too small.\n",
 				  mr_table->mtt_buddy.max_order);
 			err = -ENOMEM;
 			goto err_reserve_mtts;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 9ee4725..8e0c3cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -76,7 +76,7 @@
 		u64 size;
 		u64 start;
 		int type;
-		int num;
+		u32 num;
 		int log_num;
 	};
 
@@ -105,7 +105,7 @@
 	si_meminfo(&si);
 	request->num_mtt =
 		roundup_pow_of_two(max_t(unsigned, request->num_mtt,
-					 min(1UL << 31,
+					 min(1UL << (31 - log_mtts_per_seg),
 					     si.totalram >> (log_mtts_per_seg - 1))));
 
 	profile[MLX4_RES_QP].size     = dev_cap->qpc_entry_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index 8024982..34ee09b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -81,20 +81,6 @@
 	}
 
 	/*
-	 * Adjust port configuration:
-	 * If port 1 sensed nothing and port 2 is IB, set both as IB
-	 * If port 2 sensed nothing and port 1 is Eth, set both as Eth
-	 */
-	if (stype[0] == MLX4_PORT_TYPE_ETH) {
-		for (i = 1; i < dev->caps.num_ports; i++)
-			stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
-	}
-	if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
-		for (i = 0; i < dev->caps.num_ports - 1; i++)
-			stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
-	}
-
-	/*
 	 * If sensed nothing, remain in current configuration.
 	 */
 	for (i = 0; i < dev->caps.num_ports; i++)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 4069eda..53743f7 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -346,28 +346,15 @@
 						   "phy-mode", NULL);
 		if (mode && !strcmp(mode, "mii"))
 			return PHY_INTERFACE_MODE_MII;
-		return PHY_INTERFACE_MODE_RMII;
 	}
-
-	/* non-DT */
-#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
-	return PHY_INTERFACE_MODE_MII;
-#else
 	return PHY_INTERFACE_MODE_RMII;
-#endif
 }
 
 static bool use_iram_for_net(struct device *dev)
 {
 	if (dev && dev->of_node)
 		return of_property_read_bool(dev->of_node, "use-iram");
-
-	/* non-DT */
-#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
-	return true;
-#else
 	return false;
-#endif
 }
 
 /* Receive Status information word */
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 46df3a0..24c2305 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -8,7 +8,7 @@
 		(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
 		 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
 		 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
-		 CPU_SUBTYPE_SH7757 || ARCH_R8A7740)
+		 CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779)
 	select CRC32
 	select NET_CORE
 	select MII
@@ -18,4 +18,4 @@
 	  Renesas SuperH Ethernet device driver.
 	  This driver supporting CPUs are:
 		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
-		  and R8A7740.
+		  R8A7740 and R8A7779.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index af0b867..bad8f2e 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -78,7 +78,7 @@
 #endif
 
 /* There is CPU dependent code */
-#if defined(CONFIG_CPU_SUBTYPE_SH7724)
+#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)
 #define SH_ETH_RESET_DEFAULT	1
 static void sh_eth_set_duplex(struct net_device *ndev)
 {
@@ -93,13 +93,18 @@
 static void sh_eth_set_rate(struct net_device *ndev)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
+	unsigned int bits = ECMR_RTM;
+
+#if defined(CONFIG_ARCH_R8A7779)
+	bits |= ECMR_ELB;
+#endif
 
 	switch (mdp->speed) {
 	case 10: /* 10BASE */
-		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
+		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR);
 		break;
 	case 100:/* 100BASE */
-		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
+		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR);
 		break;
 	default:
 		break;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 70554a1..a606db4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -202,11 +202,21 @@
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)		\
 	do {						\
-		if ((efx->state == STATE_RUNNING) ||	\
+		if ((efx->state == STATE_READY) ||	\
 		    (efx->state == STATE_DISABLED))	\
 			ASSERT_RTNL();			\
 	} while (0)
 
+static int efx_check_disabled(struct efx_nic *efx)
+{
+	if (efx->state == STATE_DISABLED) {
+		netif_err(efx, drv, efx->net_dev,
+			  "device is disabled due to earlier errors\n");
+		return -EIO;
+	}
+	return 0;
+}
+
 /**************************************************************************
  *
  * Event queue processing
@@ -630,6 +640,16 @@
 	efx->rx_buffer_order = get_order(efx->rx_buffer_len +
 					 sizeof(struct efx_rx_page_state));
 
+	/* We must keep at least one descriptor in a TX ring empty.
+	 * We could avoid this when the queue size does not exactly
+	 * match the hardware ring size, but it's not that important.
+	 * Therefore we stop the queue when one more skb might fill
+	 * the ring completely.  We wake it when half way back to
+	 * empty.
+	 */
+	efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
 	/* Initialise the channels */
 	efx_for_each_channel(channel, efx) {
 		efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -730,7 +750,11 @@
 	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
 	u32 old_rxq_entries, old_txq_entries;
 	unsigned i, next_buffer_table = 0;
-	int rc = 0;
+	int rc;
+
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
 
 	/* Not all channels should be reallocated. We must avoid
 	 * reallocating their buffer table entries.
@@ -1365,6 +1389,8 @@
 {
 	struct efx_channel *channel;
 
+	BUG_ON(efx->state == STATE_DISABLED);
+
 	if (efx->legacy_irq)
 		efx->legacy_irq_enabled = true;
 	efx_nic_enable_interrupts(efx);
@@ -1382,6 +1408,9 @@
 {
 	struct efx_channel *channel;
 
+	if (efx->state == STATE_DISABLED)
+		return;
+
 	efx_mcdi_mode_poll(efx);
 
 	efx_nic_disable_interrupts(efx);
@@ -1503,6 +1532,11 @@
 		goto fail2;
 	}
 
+	BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+	if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+		rc = -EINVAL;
+		goto fail3;
+	}
 	efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
 
 	rc = efx_probe_filters(efx);
@@ -1528,22 +1562,21 @@
 	return rc;
 }
 
-/* Called after previous invocation(s) of efx_stop_all, restarts the port,
- * kernel transmit queues and NAPI processing, and ensures that the port is
- * scheduled to be reconfigured. This function is safe to call multiple
- * times when the NIC is in any state.
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured.  Interrupts must already be enabled.  This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
  */
 static void efx_start_all(struct efx_nic *efx)
 {
 	EFX_ASSERT_RESET_SERIALISED(efx);
+	BUG_ON(efx->state == STATE_DISABLED);
 
 	/* Check that it is appropriate to restart the interface. All
 	 * of these flags are safe to read under just the rtnl lock */
-	if (efx->port_enabled)
-		return;
-	if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
-		return;
-	if (!netif_running(efx->net_dev))
+	if (efx->port_enabled || !netif_running(efx->net_dev))
 		return;
 
 	efx_start_port(efx);
@@ -1577,11 +1610,11 @@
 	cancel_work_sync(&efx->mac_work);
 }
 
-/* Quiesce hardware and software without bringing the link down.
- * Safe to call multiple times, when the nic and interface is in any
- * state. The caller is guaranteed to subsequently be in a position
- * to modify any hardware and software state they see fit without
- * taking locks. */
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down.  Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled.  Requires the RTNL lock.
+ */
 static void efx_stop_all(struct efx_nic *efx)
 {
 	EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1734,8 +1767,6 @@
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct mii_ioctl_data *data = if_mii(ifr);
 
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
 	/* Convert phy_id from older PRTAD/DEVAD format */
 	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
 	    (data->phy_id & 0xfc00) == 0x0400)
@@ -1815,13 +1846,14 @@
 static int efx_net_open(struct net_device *net_dev)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
-	EFX_ASSERT_RESET_SERIALISED(efx);
+	int rc;
 
 	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
 		  raw_smp_processor_id());
 
-	if (efx->state == STATE_DISABLED)
-		return -EIO;
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
 	if (efx->phy_mode & PHY_MODE_SPECIAL)
 		return -EBUSY;
 	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1847,10 +1879,8 @@
 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
 		  raw_smp_processor_id());
 
-	if (efx->state != STATE_DISABLED) {
-		/* Stop the device and flush all the channels */
-		efx_stop_all(efx);
-	}
+	/* Stop the device and flush all the channels */
+	efx_stop_all(efx);
 
 	return 0;
 }
@@ -1910,9 +1940,11 @@
 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
 
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
 	if (new_mtu > EFX_MAX_MTU)
 		return -EINVAL;
 
@@ -1921,8 +1953,6 @@
 	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
 	mutex_lock(&efx->mac_lock);
-	/* Reconfigure the MAC before enabling the dma queues so that
-	 * the RX buffers don't overflow */
 	net_dev->mtu = new_mtu;
 	efx->type->reconfigure_mac(efx);
 	mutex_unlock(&efx->mac_lock);
@@ -1937,8 +1967,6 @@
 	struct sockaddr *addr = data;
 	char *new_addr = addr->sa_data;
 
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
 	if (!is_valid_ether_addr(new_addr)) {
 		netif_err(efx, drv, efx->net_dev,
 			  "invalid ethernet MAC address requested: %pM\n",
@@ -2070,14 +2098,31 @@
 	net_dev->irq = efx->pci_dev->irq;
 	net_dev->netdev_ops = &efx_netdev_ops;
 	SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+	net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
 
 	rtnl_lock();
 
+	/* Enable resets to be scheduled and check whether any were
+	 * already requested.  If so, the NIC is probably hosed so we
+	 * abort.
+	 */
+	efx->state = STATE_READY;
+	smp_mb(); /* ensure we change state before checking reset_pending */
+	if (efx->reset_pending) {
+		netif_err(efx, probe, efx->net_dev,
+			  "aborting probe due to scheduled reset\n");
+		rc = -EIO;
+		goto fail_locked;
+	}
+
 	rc = dev_alloc_name(net_dev, net_dev->name);
 	if (rc < 0)
 		goto fail_locked;
 	efx_update_name(efx);
 
+	/* Always start with carrier off; PHY events will detect the link */
+	netif_carrier_off(net_dev);
+
 	rc = register_netdevice(net_dev);
 	if (rc)
 		goto fail_locked;
@@ -2088,9 +2133,6 @@
 			efx_init_tx_queue_core_txq(tx_queue);
 	}
 
-	/* Always start with carrier off; PHY events will detect the link */
-	netif_carrier_off(net_dev);
-
 	rtnl_unlock();
 
 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2102,14 +2144,14 @@
 
 	return 0;
 
+fail_registered:
+	rtnl_lock();
+	unregister_netdevice(net_dev);
 fail_locked:
+	efx->state = STATE_UNINIT;
 	rtnl_unlock();
 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
 	return rc;
-
-fail_registered:
-	unregister_netdev(net_dev);
-	return rc;
 }
 
 static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2132,7 +2174,11 @@
 
 	strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
 	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-	unregister_netdev(efx->net_dev);
+
+	rtnl_lock();
+	unregister_netdevice(efx->net_dev);
+	efx->state = STATE_UNINIT;
+	rtnl_unlock();
 }
 
 /**************************************************************************
@@ -2148,9 +2194,9 @@
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
 	efx_stop_all(efx);
-	mutex_lock(&efx->mac_lock);
-
 	efx_stop_interrupts(efx, false);
+
+	mutex_lock(&efx->mac_lock);
 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
 		efx->phy_op->fini(efx);
 	efx->type->fini(efx);
@@ -2270,16 +2316,15 @@
 	if (!pending)
 		return;
 
-	/* If we're not RUNNING then don't reset. Leave the reset_pending
-	 * flags set so that efx_pci_probe_main will be retried */
-	if (efx->state != STATE_RUNNING) {
-		netif_info(efx, drv, efx->net_dev,
-			   "scheduled reset quenched. NIC not RUNNING\n");
-		return;
-	}
-
 	rtnl_lock();
-	(void)efx_reset(efx, fls(pending) - 1);
+
+	/* We checked the state in efx_schedule_reset() but it may
+	 * have changed by now.  Now that we have the RTNL lock,
+	 * it cannot change again.
+	 */
+	if (efx->state == STATE_READY)
+		(void)efx_reset(efx, fls(pending) - 1);
+
 	rtnl_unlock();
 }
 
@@ -2305,6 +2350,13 @@
 	}
 
 	set_bit(method, &efx->reset_pending);
+	smp_mb(); /* ensure we change reset_pending before checking state */
+
+	/* If we're not READY then just leave the flags set as the cue
+	 * to abort probing or reschedule the reset later.
+	 */
+	if (ACCESS_ONCE(efx->state) != STATE_READY)
+		return;
 
 	/* efx_process_channel() will no longer read events once a
 	 * reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2370,13 +2422,12 @@
 /* This zeroes out and then fills in the invariants in a struct
  * efx_nic (including all sub-structures).
  */
-static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
+static int efx_init_struct(struct efx_nic *efx,
 			   struct pci_dev *pci_dev, struct net_device *net_dev)
 {
 	int i;
 
 	/* Initialise common structures */
-	memset(efx, 0, sizeof(*efx));
 	spin_lock_init(&efx->biu_lock);
 #ifdef CONFIG_SFC_MTD
 	INIT_LIST_HEAD(&efx->mtd_list);
@@ -2386,7 +2437,7 @@
 	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
 	efx->pci_dev = pci_dev;
 	efx->msg_enable = debug;
-	efx->state = STATE_INIT;
+	efx->state = STATE_UNINIT;
 	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
 
 	efx->net_dev = net_dev;
@@ -2403,8 +2454,6 @@
 			goto fail;
 	}
 
-	efx->type = type;
-
 	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 
 	/* Higher numbered interrupt modes are less capable! */
@@ -2449,6 +2498,12 @@
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
+	/* Flush reset_work. It can no longer be scheduled since we
+	 * are not READY.
+	 */
+	BUG_ON(efx->state == STATE_READY);
+	cancel_work_sync(&efx->reset_work);
+
 #ifdef CONFIG_RFS_ACCEL
 	free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
 	efx->net_dev->rx_cpu_rmap = NULL;
@@ -2474,24 +2529,15 @@
 
 	/* Mark the NIC as fini, then stop the interface */
 	rtnl_lock();
-	efx->state = STATE_FINI;
 	dev_close(efx->net_dev);
-
-	/* Allow any queued efx_resets() to complete */
+	efx_stop_interrupts(efx, false);
 	rtnl_unlock();
 
-	efx_stop_interrupts(efx, false);
 	efx_sriov_fini(efx);
 	efx_unregister_netdev(efx);
 
 	efx_mtd_remove(efx);
 
-	/* Wait for any scheduled resets to complete. No more will be
-	 * scheduled from this point because efx_stop_all() has been
-	 * called, we are no longer registered with driverlink, and
-	 * the net_device's have been removed. */
-	cancel_work_sync(&efx->reset_work);
-
 	efx_pci_remove_main(efx);
 
 	efx_fini_io(efx);
@@ -2611,7 +2657,6 @@
 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 				   const struct pci_device_id *entry)
 {
-	const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
 	struct net_device *net_dev;
 	struct efx_nic *efx;
 	int rc;
@@ -2621,10 +2666,12 @@
 				     EFX_MAX_RX_QUEUES);
 	if (!net_dev)
 		return -ENOMEM;
-	net_dev->features |= (type->offload_features | NETIF_F_SG |
+	efx = netdev_priv(net_dev);
+	efx->type = (const struct efx_nic_type *) entry->driver_data;
+	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
 			      NETIF_F_HIGHDMA | NETIF_F_TSO |
 			      NETIF_F_RXCSUM);
-	if (type->offload_features & NETIF_F_V6_CSUM)
+	if (efx->type->offload_features & NETIF_F_V6_CSUM)
 		net_dev->features |= NETIF_F_TSO6;
 	/* Mask for features that also apply to VLAN devices */
 	net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2632,10 +2679,9 @@
 				   NETIF_F_RXCSUM);
 	/* All offloads can be toggled */
 	net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
-	efx = netdev_priv(net_dev);
 	pci_set_drvdata(pci_dev, efx);
 	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
-	rc = efx_init_struct(efx, type, pci_dev, net_dev);
+	rc = efx_init_struct(efx, pci_dev, net_dev);
 	if (rc)
 		goto fail1;
 
@@ -2650,28 +2696,9 @@
 		goto fail2;
 
 	rc = efx_pci_probe_main(efx);
-
-	/* Serialise against efx_reset(). No more resets will be
-	 * scheduled since efx_stop_all() has been called, and we have
-	 * not and never have been registered.
-	 */
-	cancel_work_sync(&efx->reset_work);
-
 	if (rc)
 		goto fail3;
 
-	/* If there was a scheduled reset during probe, the NIC is
-	 * probably hosed anyway.
-	 */
-	if (efx->reset_pending) {
-		rc = -EIO;
-		goto fail4;
-	}
-
-	/* Switch to the running state before we expose the device to the OS,
-	 * so that dev_open()|efx_start_all() will actually start the device */
-	efx->state = STATE_RUNNING;
-
 	rc = efx_register_netdev(efx);
 	if (rc)
 		goto fail4;
@@ -2711,12 +2738,18 @@
 {
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
-	efx->state = STATE_FINI;
+	rtnl_lock();
 
-	netif_device_detach(efx->net_dev);
+	if (efx->state != STATE_DISABLED) {
+		efx->state = STATE_UNINIT;
 
-	efx_stop_all(efx);
-	efx_stop_interrupts(efx, false);
+		netif_device_detach(efx->net_dev);
+
+		efx_stop_all(efx);
+		efx_stop_interrupts(efx, false);
+	}
+
+	rtnl_unlock();
 
 	return 0;
 }
@@ -2725,21 +2758,25 @@
 {
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
-	efx->state = STATE_INIT;
+	rtnl_lock();
 
-	efx_start_interrupts(efx, false);
+	if (efx->state != STATE_DISABLED) {
+		efx_start_interrupts(efx, false);
 
-	mutex_lock(&efx->mac_lock);
-	efx->phy_op->reconfigure(efx);
-	mutex_unlock(&efx->mac_lock);
+		mutex_lock(&efx->mac_lock);
+		efx->phy_op->reconfigure(efx);
+		mutex_unlock(&efx->mac_lock);
 
-	efx_start_all(efx);
+		efx_start_all(efx);
 
-	netif_device_attach(efx->net_dev);
+		netif_device_attach(efx->net_dev);
 
-	efx->state = STATE_RUNNING;
+		efx->state = STATE_READY;
 
-	efx->type->resume_wol(efx);
+		efx->type->resume_wol(efx);
+	}
+
+	rtnl_unlock();
 
 	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
 	queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index be8f915..70755c9 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -30,6 +30,7 @@
 efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
 
 /* RX */
 extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -52,10 +53,15 @@
 #define EFX_MAX_EVQ_SIZE 16384UL
 #define EFX_MIN_EVQ_SIZE 512UL
 
-/* The smallest [rt]xq_entries that the driver supports. Callers of
- * efx_wake_queue() assume that they can subsequently send at least one
- * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
-#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS	100
+
+/* The smallest [rt]xq_entries that the driver supports.  RX minimum
+ * is a bit arbitrary.  For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT		128U
+#define EFX_TXQ_MIN_ENT(efx)	(2 * efx_tx_max_skb_descs(efx))
 
 /* Filters */
 extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 10536f9..2bd5c2d 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -529,9 +529,7 @@
 	if (!efx_tests)
 		goto fail;
 
-
-	ASSERT_RTNL();
-	if (efx->state != STATE_RUNNING) {
+	if (efx->state != STATE_READY) {
 		rc = -EIO;
 		goto fail1;
 	}
@@ -680,21 +678,27 @@
 				     struct ethtool_ringparam *ring)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
+	u32 txq_entries;
 
 	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
 	    ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
 	    ring->tx_pending > EFX_MAX_DMAQ_SIZE)
 		return -EINVAL;
 
-	if (ring->rx_pending < EFX_MIN_RING_SIZE ||
-	    ring->tx_pending < EFX_MIN_RING_SIZE) {
+	if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
 		netif_err(efx, drv, efx->net_dev,
-			  "TX and RX queues cannot be smaller than %ld\n",
-			  EFX_MIN_RING_SIZE);
+			  "RX queues cannot be smaller than %u\n",
+			  EFX_RXQ_MIN_ENT);
 		return -EINVAL;
 	}
 
-	return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+	txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+	if (txq_entries != ring->tx_pending)
+		netif_warn(efx, drv, efx->net_dev,
+			   "increasing TX queue size to minimum of %u\n",
+			   txq_entries);
+
+	return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
 }
 
 static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 8687a6c..ec1e99d 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -380,7 +380,7 @@
 		new_mode = PHY_MODE_SPECIAL;
 	if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
 		err = 0;
-	} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+	} else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
 		err = -EBUSY;
 	} else {
 		/* Reset the PHY, reconfigure the MAC and enable/disable
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cd9c0a9..7ab1232 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,29 +91,31 @@
 };
 
 /**
- * struct efx_tx_buffer - An Efx TX buffer
- * @skb: The associated socket buffer.
- *	Set only on the final fragment of a packet; %NULL for all other
- *	fragments.  When this fragment completes, then we can free this
- *	skb.
- * @tsoh: The associated TSO header structure, or %NULL if this
- *	buffer is not a TSO header.
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ *	freed when descriptor completes
+ * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
+ *	freed when descriptor completes.
  * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
  * @len: Length of this fragment.
  *	This field is zero when the queue slot is empty.
- * @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if dma_unmap_single should be used.
  * @unmap_len: Length of this fragment to unmap
  */
 struct efx_tx_buffer {
-	const struct sk_buff *skb;
-	struct efx_tso_header *tsoh;
+	union {
+		const struct sk_buff *skb;
+		void *heap_buf;
+	};
 	dma_addr_t dma_addr;
+	unsigned short flags;
 	unsigned short len;
-	bool continuation;
-	bool unmap_single;
 	unsigned short unmap_len;
 };
+#define EFX_TX_BUF_CONT		1	/* not last descriptor of packet */
+#define EFX_TX_BUF_SKB		2	/* buffer is last part of skb */
+#define EFX_TX_BUF_HEAP		4	/* buffer was allocated with kmalloc() */
+#define EFX_TX_BUF_MAP_SINGLE	8	/* buffer was mapped with dma_map_single() */
 
 /**
  * struct efx_tx_queue - An Efx TX queue
@@ -133,6 +135,7 @@
  * @channel: The associated channel
  * @core_txq: The networking core TX queue structure
  * @buffer: The software buffer ring
+ * @tsoh_page: Array of pages of TSO header buffers
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
  * @initialised: Has hardware queue been initialised?
@@ -156,9 +159,6 @@
  *	variable indicates that the queue is full.  This is to
  *	avoid cache-line ping-pong between the xmit path and the
  *	completion path.
- * @tso_headers_free: A list of TSO headers allocated for this TX queue
- *	that are not in use, and so available for new TSO sends. The list
- *	is protected by the TX queue lock.
  * @tso_bursts: Number of times TSO xmit invoked by kernel
  * @tso_long_headers: Number of packets with headers too long for standard
  *	blocks
@@ -175,6 +175,7 @@
 	struct efx_channel *channel;
 	struct netdev_queue *core_txq;
 	struct efx_tx_buffer *buffer;
+	struct efx_buffer *tsoh_page;
 	struct efx_special_buffer txd;
 	unsigned int ptr_mask;
 	bool initialised;
@@ -187,7 +188,6 @@
 	unsigned int insert_count ____cacheline_aligned_in_smp;
 	unsigned int write_count;
 	unsigned int old_read_count;
-	struct efx_tso_header *tso_headers_free;
 	unsigned int tso_bursts;
 	unsigned int tso_long_headers;
 	unsigned int tso_packets;
@@ -430,11 +430,9 @@
 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
 
 enum nic_state {
-	STATE_INIT = 0,
-	STATE_RUNNING = 1,
-	STATE_FINI = 2,
-	STATE_DISABLED = 3,
-	STATE_MAX,
+	STATE_UNINIT = 0,	/* device being probed/removed or is frozen */
+	STATE_READY = 1,	/* hardware ready and netdev registered */
+	STATE_DISABLED = 2,	/* device disabled due to hardware errors */
 };
 
 /*
@@ -654,7 +652,7 @@
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
  * @msg_enable: Log message enable flags
- * @state: Device state flag. Serialised by the rtnl_lock.
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
  * @reset_pending: Bitmask for pending resets
  * @tx_queue: TX DMA queues
  * @rx_queue: RX DMA queues
@@ -664,6 +662,8 @@
  *	should be allocated for this NIC
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
  * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
  * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
  * @sram_lim_qw: Qword address limit of SRAM
@@ -774,6 +774,9 @@
 
 	unsigned rxq_entries;
 	unsigned txq_entries;
+	unsigned int txq_stop_thresh;
+	unsigned int txq_wake_thresh;
+
 	unsigned tx_dc_base;
 	unsigned rx_dc_base;
 	unsigned sram_lim_qw;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 326d799..cdff40b 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -298,7 +298,7 @@
 /**************************************************************************
  *
  * Generic buffer handling
- * These buffers are used for interrupt status and MAC stats
+ * These buffers are used for interrupt status, MAC stats, etc.
  *
  **************************************************************************/
 
@@ -401,8 +401,10 @@
 		++tx_queue->write_count;
 
 		/* Create TX descriptor ring entry */
+		BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
 		EFX_POPULATE_QWORD_4(*txd,
-				     FSF_AZ_TX_KER_CONT, buffer->continuation,
+				     FSF_AZ_TX_KER_CONT,
+				     buffer->flags & EFX_TX_BUF_CONT,
 				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
 				     FSF_AZ_TX_KER_BUF_REGION, 0,
 				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9b225a7..ebca75e 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -22,14 +22,6 @@
 #include "nic.h"
 #include "workarounds.h"
 
-/*
- * TX descriptor ring full threshold
- *
- * The tx_queue descriptor ring fill-level must fall below this value
- * before we restart the netif queue
- */
-#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
-
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
 			       struct efx_tx_buffer *buffer,
 			       unsigned int *pkts_compl,
@@ -39,67 +31,32 @@
 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
 					 buffer->unmap_len);
-		if (buffer->unmap_single)
+		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
 					 DMA_TO_DEVICE);
 		else
 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
 				       DMA_TO_DEVICE);
 		buffer->unmap_len = 0;
-		buffer->unmap_single = false;
 	}
 
-	if (buffer->skb) {
+	if (buffer->flags & EFX_TX_BUF_SKB) {
 		(*pkts_compl)++;
 		(*bytes_compl) += buffer->skb->len;
 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
-		buffer->skb = NULL;
 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
 			   "TX queue %d transmission id %x complete\n",
 			   tx_queue->queue, tx_queue->read_count);
+	} else if (buffer->flags & EFX_TX_BUF_HEAP) {
+		kfree(buffer->heap_buf);
 	}
-}
 
-/**
- * struct efx_tso_header - a DMA mapped buffer for packet headers
- * @next: Linked list of free ones.
- *	The list is protected by the TX queue lock.
- * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
- * @dma_addr: The DMA address of the header below.
- *
- * This controls the memory used for a TSO header.  Use TSOH_DATA()
- * to find the packet header data.  Use TSOH_SIZE() to calculate the
- * total size required for a given packet header length.  TSO headers
- * in the free list are exactly %TSOH_STD_SIZE bytes in size.
- */
-struct efx_tso_header {
-	union {
-		struct efx_tso_header *next;
-		size_t unmap_len;
-	};
-	dma_addr_t dma_addr;
-};
+	buffer->len = 0;
+	buffer->flags = 0;
+}
 
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 			       struct sk_buff *skb);
-static void efx_fini_tso(struct efx_tx_queue *tx_queue);
-static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
-			       struct efx_tso_header *tsoh);
-
-static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
-			  struct efx_tx_buffer *buffer)
-{
-	if (buffer->tsoh) {
-		if (likely(!buffer->tsoh->unmap_len)) {
-			buffer->tsoh->next = tx_queue->tso_headers_free;
-			tx_queue->tso_headers_free = buffer->tsoh;
-		} else {
-			efx_tsoh_heap_free(tx_queue, buffer->tsoh);
-		}
-		buffer->tsoh = NULL;
-	}
-}
-
 
 static inline unsigned
 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -119,6 +76,75 @@
 	return len;
 }
 
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+	/* Header and payload descriptor for each output segment, plus
+	 * one for every input fragment boundary within a segment
+	 */
+	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+	/* Possibly one more per segment for the alignment workaround */
+	if (EFX_WORKAROUND_5391(efx))
+		max_descs += EFX_TSO_MAX_SEGS;
+
+	/* Possibly more for PCIe page boundaries within input fragments */
+	if (PAGE_SIZE > EFX_PAGE_SIZE)
+		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+	return max_descs;
+}
+
+/* Get partner of a TX queue, seen as part of the same net core queue */
+static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
+{
+	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+		return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
+	else
+		return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+	/* We need to consider both queues that the net core sees as one */
+	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+	struct efx_nic *efx = txq1->efx;
+	unsigned int fill_level;
+
+	fill_level = max(txq1->insert_count - txq1->old_read_count,
+			 txq2->insert_count - txq2->old_read_count);
+	if (likely(fill_level < efx->txq_stop_thresh))
+		return;
+
+	/* We used the stale old_read_count above, which gives us a
+	 * pessimistic estimate of the fill level (which may even
+	 * validly be >= efx->txq_entries).  Now try again using
+	 * read_count (more likely to be a cache miss).
+	 *
+	 * If we read read_count and then conditionally stop the
+	 * queue, it is possible for the completion path to race with
+	 * us and complete all outstanding descriptors in the middle,
+	 * after which there will be no more completions to wake it.
+	 * Therefore we stop the queue first, then read read_count
+	 * (with a memory barrier to ensure the ordering), then
+	 * restart the queue if the fill level turns out to be low
+	 * enough.
+	 */
+	netif_tx_stop_queue(txq1->core_txq);
+	smp_mb();
+	txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+	txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+
+	fill_level = max(txq1->insert_count - txq1->old_read_count,
+			 txq2->insert_count - txq2->old_read_count);
+	EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+	if (likely(fill_level < efx->txq_stop_thresh)) {
+		smp_mb();
+		if (likely(!efx->loopback_selftest))
+			netif_tx_start_queue(txq1->core_txq);
+	}
+}
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -132,7 +158,7 @@
  * This function is split out from efx_hard_start_xmit to allow the
  * loopback test to direct packets via specific TX queues.
  *
- * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
+ * Returns NETDEV_TX_OK.
  * You must hold netif_tx_lock() to call this function.
  */
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -141,12 +167,11 @@
 	struct device *dma_dev = &efx->pci_dev->dev;
 	struct efx_tx_buffer *buffer;
 	skb_frag_t *fragment;
-	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
+	unsigned int len, unmap_len = 0, insert_ptr;
 	dma_addr_t dma_addr, unmap_addr = 0;
 	unsigned int dma_len;
-	bool unmap_single;
-	int q_space, i = 0;
-	netdev_tx_t rc = NETDEV_TX_OK;
+	unsigned short dma_flags;
+	int i = 0;
 
 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
@@ -164,14 +189,11 @@
 			return NETDEV_TX_OK;
 	}
 
-	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-	q_space = efx->txq_entries - 1 - fill_level;
-
 	/* Map for DMA.  Use dma_map_single rather than dma_map_page
 	 * since this is more efficient on machines with sparse
 	 * memory.
 	 */
-	unmap_single = true;
+	dma_flags = EFX_TX_BUF_MAP_SINGLE;
 	dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
 
 	/* Process all fragments */
@@ -186,39 +208,10 @@
 
 		/* Add to TX queue, splitting across DMA boundaries */
 		do {
-			if (unlikely(q_space-- <= 0)) {
-				/* It might be that completions have
-				 * happened since the xmit path last
-				 * checked.  Update the xmit path's
-				 * copy of read_count.
-				 */
-				netif_tx_stop_queue(tx_queue->core_txq);
-				/* This memory barrier protects the
-				 * change of queue state from the access
-				 * of read_count. */
-				smp_mb();
-				tx_queue->old_read_count =
-					ACCESS_ONCE(tx_queue->read_count);
-				fill_level = (tx_queue->insert_count
-					      - tx_queue->old_read_count);
-				q_space = efx->txq_entries - 1 - fill_level;
-				if (unlikely(q_space-- <= 0)) {
-					rc = NETDEV_TX_BUSY;
-					goto unwind;
-				}
-				smp_mb();
-				if (likely(!efx->loopback_selftest))
-					netif_tx_start_queue(
-						tx_queue->core_txq);
-			}
-
 			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 			buffer = &tx_queue->buffer[insert_ptr];
-			efx_tsoh_free(tx_queue, buffer);
-			EFX_BUG_ON_PARANOID(buffer->tsoh);
-			EFX_BUG_ON_PARANOID(buffer->skb);
+			EFX_BUG_ON_PARANOID(buffer->flags);
 			EFX_BUG_ON_PARANOID(buffer->len);
-			EFX_BUG_ON_PARANOID(!buffer->continuation);
 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
 			dma_len = efx_max_tx_len(efx, dma_addr);
@@ -228,13 +221,14 @@
 			/* Fill out per descriptor fields */
 			buffer->len = dma_len;
 			buffer->dma_addr = dma_addr;
+			buffer->flags = EFX_TX_BUF_CONT;
 			len -= dma_len;
 			dma_addr += dma_len;
 			++tx_queue->insert_count;
 		} while (len);
 
 		/* Transfer ownership of the unmapping to the final buffer */
-		buffer->unmap_single = unmap_single;
+		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
 		buffer->unmap_len = unmap_len;
 		unmap_len = 0;
 
@@ -245,20 +239,22 @@
 		len = skb_frag_size(fragment);
 		i++;
 		/* Map for DMA */
-		unmap_single = false;
+		dma_flags = 0;
 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
 					    DMA_TO_DEVICE);
 	}
 
 	/* Transfer ownership of the skb to the final buffer */
 	buffer->skb = skb;
-	buffer->continuation = false;
+	buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
 
 	/* Pass off to hardware */
 	efx_nic_push_buffers(tx_queue);
 
+	efx_tx_maybe_stop_queue(tx_queue);
+
 	return NETDEV_TX_OK;
 
  dma_err:
@@ -270,7 +266,6 @@
 	/* Mark the packet as transmitted, and free the SKB ourselves */
 	dev_kfree_skb_any(skb);
 
- unwind:
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -278,12 +273,11 @@
 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 		buffer = &tx_queue->buffer[insert_ptr];
 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-		buffer->len = 0;
 	}
 
 	/* Free the fragment we were mid-way through pushing */
 	if (unmap_len) {
-		if (unmap_single)
+		if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
 			dma_unmap_single(dma_dev, unmap_addr, unmap_len,
 					 DMA_TO_DEVICE);
 		else
@@ -291,7 +285,7 @@
 				       DMA_TO_DEVICE);
 	}
 
-	return rc;
+	return NETDEV_TX_OK;
 }
 
 /* Remove packets from the TX queue
@@ -321,8 +315,6 @@
 		}
 
 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-		buffer->continuation = true;
-		buffer->len = 0;
 
 		++tx_queue->read_count;
 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -431,6 +423,7 @@
 {
 	unsigned fill_level;
 	struct efx_nic *efx = tx_queue->efx;
+	struct efx_tx_queue *txq2;
 	unsigned int pkts_compl = 0, bytes_compl = 0;
 
 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -438,15 +431,18 @@
 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
 	netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 
-	/* See if we need to restart the netif queue.  This barrier
-	 * separates the update of read_count from the test of the
-	 * queue state. */
+	/* See if we need to restart the netif queue.  This memory
+	 * barrier ensures that we write read_count (inside
+	 * efx_dequeue_buffers()) before reading the queue status.
+	 */
 	smp_mb();
 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
 	    likely(efx->port_enabled) &&
 	    likely(netif_device_present(efx->net_dev))) {
-		fill_level = tx_queue->insert_count - tx_queue->read_count;
-		if (fill_level < EFX_TXQ_THRESHOLD(efx))
+		txq2 = efx_tx_queue_partner(tx_queue);
+		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+				 txq2->insert_count - txq2->read_count);
+		if (fill_level <= efx->txq_wake_thresh)
 			netif_tx_wake_queue(tx_queue->core_txq);
 	}
 
@@ -461,11 +457,26 @@
 	}
 }
 
+/* Size of page-based TSO header buffers.  Larger blocks must be
+ * allocated from the heap.
+ */
+#define TSOH_STD_SIZE	128
+#define TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
+
+/* At most half the descriptors in the queue at any time will refer to
+ * a TSO header buffer, since they must always be followed by a
+ * payload descriptor referring to an skb.
+ */
+static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
+{
+	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
+}
+
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
 {
 	struct efx_nic *efx = tx_queue->efx;
 	unsigned int entries;
-	int i, rc;
+	int rc;
 
 	/* Create the smallest power-of-two aligned ring */
 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -481,17 +492,28 @@
 				   GFP_KERNEL);
 	if (!tx_queue->buffer)
 		return -ENOMEM;
-	for (i = 0; i <= tx_queue->ptr_mask; ++i)
-		tx_queue->buffer[i].continuation = true;
+
+	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
+		tx_queue->tsoh_page =
+			kcalloc(efx_tsoh_page_count(tx_queue),
+				sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
+		if (!tx_queue->tsoh_page) {
+			rc = -ENOMEM;
+			goto fail1;
+		}
+	}
 
 	/* Allocate hardware ring */
 	rc = efx_nic_probe_tx(tx_queue);
 	if (rc)
-		goto fail;
+		goto fail2;
 
 	return 0;
 
- fail:
+fail2:
+	kfree(tx_queue->tsoh_page);
+	tx_queue->tsoh_page = NULL;
+fail1:
 	kfree(tx_queue->buffer);
 	tx_queue->buffer = NULL;
 	return rc;
@@ -527,8 +549,6 @@
 		unsigned int pkts_compl = 0, bytes_compl = 0;
 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-		buffer->continuation = true;
-		buffer->len = 0;
 
 		++tx_queue->read_count;
 	}
@@ -549,13 +569,12 @@
 	efx_nic_fini_tx(tx_queue);
 
 	efx_release_tx_buffers(tx_queue);
-
-	/* Free up TSO header cache */
-	efx_fini_tso(tx_queue);
 }
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
+	int i;
+
 	if (!tx_queue->buffer)
 		return;
 
@@ -563,6 +582,14 @@
 		  "destroying TX queue %d\n", tx_queue->queue);
 	efx_nic_remove_tx(tx_queue);
 
+	if (tx_queue->tsoh_page) {
+		for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
+			efx_nic_free_buffer(tx_queue->efx,
+					    &tx_queue->tsoh_page[i]);
+		kfree(tx_queue->tsoh_page);
+		tx_queue->tsoh_page = NULL;
+	}
+
 	kfree(tx_queue->buffer);
 	tx_queue->buffer = NULL;
 }
@@ -585,22 +612,7 @@
 #define TSOH_OFFSET	NET_IP_ALIGN
 #endif
 
-#define TSOH_BUFFER(tsoh)	((u8 *)(tsoh + 1) + TSOH_OFFSET)
-
-/* Total size of struct efx_tso_header, buffer and padding */
-#define TSOH_SIZE(hdr_len)					\
-	(sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
-
-/* Size of blocks on free list.  Larger blocks must be allocated from
- * the heap.
- */
-#define TSOH_STD_SIZE		128
-
 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
-#define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
-#define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
-#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
-#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
 
 /**
  * struct tso_state - TSO state for an SKB
@@ -612,10 +624,12 @@
  * @in_len: Remaining length in current SKB fragment
  * @unmap_len: Length of SKB fragment
  * @unmap_addr: DMA address of SKB fragment
- * @unmap_single: DMA single vs page mapping flag
+ * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
  * @protocol: Network protocol (after any VLAN header)
+ * @ip_off: Offset of IP header
+ * @tcp_off: Offset of TCP header
  * @header_len: Number of bytes of header
- * @full_packet_size: Number of bytes to put in each outgoing segment
+ * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
  *
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
@@ -632,11 +646,13 @@
 	unsigned in_len;
 	unsigned unmap_len;
 	dma_addr_t unmap_addr;
-	bool unmap_single;
+	unsigned short dma_flags;
 
 	__be16 protocol;
+	unsigned int ip_off;
+	unsigned int tcp_off;
 	unsigned header_len;
-	int full_packet_size;
+	unsigned int ip_base_len;
 };
 
 
@@ -668,91 +684,43 @@
 	return protocol;
 }
 
-
-/*
- * Allocate a page worth of efx_tso_header structures, and string them
- * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
- */
-static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
+			       struct efx_tx_buffer *buffer, unsigned int len)
 {
-	struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
-	struct efx_tso_header *tsoh;
-	dma_addr_t dma_addr;
-	u8 *base_kva, *kva;
+	u8 *result;
 
-	base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
-	if (base_kva == NULL) {
-		netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
-			  "Unable to allocate page for TSO headers\n");
-		return -ENOMEM;
+	EFX_BUG_ON_PARANOID(buffer->len);
+	EFX_BUG_ON_PARANOID(buffer->flags);
+	EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+	if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+		unsigned index =
+			(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
+		struct efx_buffer *page_buf =
+			&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
+		unsigned offset =
+			TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+
+		if (unlikely(!page_buf->addr) &&
+		    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+			return NULL;
+
+		result = (u8 *)page_buf->addr + offset;
+		buffer->dma_addr = page_buf->dma_addr + offset;
+		buffer->flags = EFX_TX_BUF_CONT;
+	} else {
+		tx_queue->tso_long_headers++;
+
+		buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+		if (unlikely(!buffer->heap_buf))
+			return NULL;
+		result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+		buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
 	}
 
-	/* dma_alloc_coherent() allocates pages. */
-	EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
+	buffer->len = len;
 
-	for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
-		tsoh = (struct efx_tso_header *)kva;
-		tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
-		tsoh->next = tx_queue->tso_headers_free;
-		tx_queue->tso_headers_free = tsoh;
-	}
-
-	return 0;
-}
-
-
-/* Free up a TSO header, and all others in the same page. */
-static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
-				struct efx_tso_header *tsoh,
-				struct device *dma_dev)
-{
-	struct efx_tso_header **p;
-	unsigned long base_kva;
-	dma_addr_t base_dma;
-
-	base_kva = (unsigned long)tsoh & PAGE_MASK;
-	base_dma = tsoh->dma_addr & PAGE_MASK;
-
-	p = &tx_queue->tso_headers_free;
-	while (*p != NULL) {
-		if (((unsigned long)*p & PAGE_MASK) == base_kva)
-			*p = (*p)->next;
-		else
-			p = &(*p)->next;
-	}
-
-	dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
-}
-
-static struct efx_tso_header *
-efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
-{
-	struct efx_tso_header *tsoh;
-
-	tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
-	if (unlikely(!tsoh))
-		return NULL;
-
-	tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
-					TSOH_BUFFER(tsoh), header_len,
-					DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
-				       tsoh->dma_addr))) {
-		kfree(tsoh);
-		return NULL;
-	}
-
-	tsoh->unmap_len = header_len;
-	return tsoh;
-}
-
-static void
-efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
-{
-	dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-			 tsoh->dma_addr, tsoh->unmap_len,
-			 DMA_TO_DEVICE);
-	kfree(tsoh);
+	return result;
 }
 
 /**
@@ -762,47 +730,19 @@
  * @len:		Length of fragment
  * @final_buffer:	The final buffer inserted into the queue
  *
- * Push descriptors onto the TX queue.  Return 0 on success or 1 if
- * @tx_queue full.
+ * Push descriptors onto the TX queue.
  */
-static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
-			       dma_addr_t dma_addr, unsigned len,
-			       struct efx_tx_buffer **final_buffer)
+static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+				dma_addr_t dma_addr, unsigned len,
+				struct efx_tx_buffer **final_buffer)
 {
 	struct efx_tx_buffer *buffer;
 	struct efx_nic *efx = tx_queue->efx;
-	unsigned dma_len, fill_level, insert_ptr;
-	int q_space;
+	unsigned dma_len, insert_ptr;
 
 	EFX_BUG_ON_PARANOID(len <= 0);
 
-	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-	/* -1 as there is no way to represent all descriptors used */
-	q_space = efx->txq_entries - 1 - fill_level;
-
 	while (1) {
-		if (unlikely(q_space-- <= 0)) {
-			/* It might be that completions have happened
-			 * since the xmit path last checked.  Update
-			 * the xmit path's copy of read_count.
-			 */
-			netif_tx_stop_queue(tx_queue->core_txq);
-			/* This memory barrier protects the change of
-			 * queue state from the access of read_count. */
-			smp_mb();
-			tx_queue->old_read_count =
-				ACCESS_ONCE(tx_queue->read_count);
-			fill_level = (tx_queue->insert_count
-				      - tx_queue->old_read_count);
-			q_space = efx->txq_entries - 1 - fill_level;
-			if (unlikely(q_space-- <= 0)) {
-				*final_buffer = NULL;
-				return 1;
-			}
-			smp_mb();
-			netif_tx_start_queue(tx_queue->core_txq);
-		}
-
 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 		buffer = &tx_queue->buffer[insert_ptr];
 		++tx_queue->insert_count;
@@ -811,12 +751,9 @@
 				    tx_queue->read_count >=
 				    efx->txq_entries);
 
-		efx_tsoh_free(tx_queue, buffer);
 		EFX_BUG_ON_PARANOID(buffer->len);
 		EFX_BUG_ON_PARANOID(buffer->unmap_len);
-		EFX_BUG_ON_PARANOID(buffer->skb);
-		EFX_BUG_ON_PARANOID(!buffer->continuation);
-		EFX_BUG_ON_PARANOID(buffer->tsoh);
+		EFX_BUG_ON_PARANOID(buffer->flags);
 
 		buffer->dma_addr = dma_addr;
 
@@ -826,7 +763,8 @@
 		if (dma_len >= len)
 			break;
 
-		buffer->len = dma_len; /* Don't set the other members */
+		buffer->len = dma_len;
+		buffer->flags = EFX_TX_BUF_CONT;
 		dma_addr += dma_len;
 		len -= dma_len;
 	}
@@ -834,7 +772,6 @@
 	EFX_BUG_ON_PARANOID(!len);
 	buffer->len = len;
 	*final_buffer = buffer;
-	return 0;
 }
 
 
@@ -845,54 +782,42 @@
  * a single fragment, and we know it doesn't cross a page boundary.  It
  * also allows us to not worry about end-of-packet etc.
  */
-static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
-			       struct efx_tso_header *tsoh, unsigned len)
+static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
+			      struct efx_tx_buffer *buffer, u8 *header)
 {
-	struct efx_tx_buffer *buffer;
-
-	buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
-	efx_tsoh_free(tx_queue, buffer);
-	EFX_BUG_ON_PARANOID(buffer->len);
-	EFX_BUG_ON_PARANOID(buffer->unmap_len);
-	EFX_BUG_ON_PARANOID(buffer->skb);
-	EFX_BUG_ON_PARANOID(!buffer->continuation);
-	EFX_BUG_ON_PARANOID(buffer->tsoh);
-	buffer->len = len;
-	buffer->dma_addr = tsoh->dma_addr;
-	buffer->tsoh = tsoh;
+	if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
+		buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
+						  header, buffer->len,
+						  DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+					       buffer->dma_addr))) {
+			kfree(buffer->heap_buf);
+			buffer->len = 0;
+			buffer->flags = 0;
+			return -ENOMEM;
+		}
+		buffer->unmap_len = buffer->len;
+		buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
+	}
 
 	++tx_queue->insert_count;
+	return 0;
 }
 
 
-/* Remove descriptors put into a tx_queue. */
+/* Remove buffers put into a tx_queue.  None of the buffers must have
+ * an skb attached.
+ */
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
 	struct efx_tx_buffer *buffer;
-	dma_addr_t unmap_addr;
 
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		--tx_queue->insert_count;
 		buffer = &tx_queue->buffer[tx_queue->insert_count &
 					   tx_queue->ptr_mask];
-		efx_tsoh_free(tx_queue, buffer);
-		EFX_BUG_ON_PARANOID(buffer->skb);
-		if (buffer->unmap_len) {
-			unmap_addr = (buffer->dma_addr + buffer->len -
-				      buffer->unmap_len);
-			if (buffer->unmap_single)
-				dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-						 unmap_addr, buffer->unmap_len,
-						 DMA_TO_DEVICE);
-			else
-				dma_unmap_page(&tx_queue->efx->pci_dev->dev,
-					       unmap_addr, buffer->unmap_len,
-					       DMA_TO_DEVICE);
-			buffer->unmap_len = 0;
-		}
-		buffer->len = 0;
-		buffer->continuation = true;
+		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
 	}
 }
 
@@ -900,17 +825,16 @@
 /* Parse the SKB header and initialise state. */
 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 {
-	/* All ethernet/IP/TCP headers combined size is TCP header size
-	 * plus offset of TCP header relative to start of packet.
-	 */
-	st->header_len = ((tcp_hdr(skb)->doff << 2u)
-			  + PTR_DIFF(tcp_hdr(skb), skb->data));
-	st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
-
-	if (st->protocol == htons(ETH_P_IP))
+	st->ip_off = skb_network_header(skb) - skb->data;
+	st->tcp_off = skb_transport_header(skb) - skb->data;
+	st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+	if (st->protocol == htons(ETH_P_IP)) {
+		st->ip_base_len = st->header_len - st->ip_off;
 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
-	else
+	} else {
+		st->ip_base_len = st->header_len - st->tcp_off;
 		st->ipv4_id = 0;
+	}
 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
 
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -919,7 +843,7 @@
 
 	st->out_len = skb->len - st->header_len;
 	st->unmap_len = 0;
-	st->unmap_single = false;
+	st->dma_flags = 0;
 }
 
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -928,7 +852,7 @@
 	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
 					  skb_frag_size(frag), DMA_TO_DEVICE);
 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-		st->unmap_single = false;
+		st->dma_flags = 0;
 		st->unmap_len = skb_frag_size(frag);
 		st->in_len = skb_frag_size(frag);
 		st->dma_addr = st->unmap_addr;
@@ -946,7 +870,7 @@
 	st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
 					len, DMA_TO_DEVICE);
 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-		st->unmap_single = true;
+		st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
 		st->unmap_len = len;
 		st->in_len = len;
 		st->dma_addr = st->unmap_addr;
@@ -963,20 +887,19 @@
  * @st:			TSO state
  *
  * Form descriptors for the current fragment, until we reach the end
- * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
- * space in @tx_queue.
+ * of fragment or end-of-packet.
  */
-static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
-					 const struct sk_buff *skb,
-					 struct tso_state *st)
+static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+					  const struct sk_buff *skb,
+					  struct tso_state *st)
 {
 	struct efx_tx_buffer *buffer;
-	int n, end_of_packet, rc;
+	int n;
 
 	if (st->in_len == 0)
-		return 0;
+		return;
 	if (st->packet_space == 0)
-		return 0;
+		return;
 
 	EFX_BUG_ON_PARANOID(st->in_len <= 0);
 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -987,25 +910,24 @@
 	st->out_len -= n;
 	st->in_len -= n;
 
-	rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
-	if (likely(rc == 0)) {
-		if (st->out_len == 0)
-			/* Transfer ownership of the skb */
-			buffer->skb = skb;
+	efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
 
-		end_of_packet = st->out_len == 0 || st->packet_space == 0;
-		buffer->continuation = !end_of_packet;
+	if (st->out_len == 0) {
+		/* Transfer ownership of the skb */
+		buffer->skb = skb;
+		buffer->flags = EFX_TX_BUF_SKB;
+	} else if (st->packet_space != 0) {
+		buffer->flags = EFX_TX_BUF_CONT;
+	}
 
-		if (st->in_len == 0) {
-			/* Transfer ownership of the DMA mapping */
-			buffer->unmap_len = st->unmap_len;
-			buffer->unmap_single = st->unmap_single;
-			st->unmap_len = 0;
-		}
+	if (st->in_len == 0) {
+		/* Transfer ownership of the DMA mapping */
+		buffer->unmap_len = st->unmap_len;
+		buffer->flags |= st->dma_flags;
+		st->unmap_len = 0;
 	}
 
 	st->dma_addr += n;
-	return rc;
 }
 
 
@@ -1016,36 +938,25 @@
  * @st:			TSO state
  *
  * Generate a new header and prepare for the new packet.  Return 0 on
- * success, or -1 if failed to alloc header.
+ * success, or -%ENOMEM if failed to alloc header.
  */
 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 				const struct sk_buff *skb,
 				struct tso_state *st)
 {
-	struct efx_tso_header *tsoh;
+	struct efx_tx_buffer *buffer =
+		&tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
 	struct tcphdr *tsoh_th;
 	unsigned ip_length;
 	u8 *header;
+	int rc;
 
-	/* Allocate a DMA-mapped header buffer. */
-	if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
-		if (tx_queue->tso_headers_free == NULL) {
-			if (efx_tsoh_block_alloc(tx_queue))
-				return -1;
-		}
-		EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
-		tsoh = tx_queue->tso_headers_free;
-		tx_queue->tso_headers_free = tsoh->next;
-		tsoh->unmap_len = 0;
-	} else {
-		tx_queue->tso_long_headers++;
-		tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
-		if (unlikely(!tsoh))
-			return -1;
-	}
+	/* Allocate and insert a DMA-mapped header buffer. */
+	header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+	if (!header)
+		return -ENOMEM;
 
-	header = TSOH_BUFFER(tsoh);
-	tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+	tsoh_th = (struct tcphdr *)(header + st->tcp_off);
 
 	/* Copy and update the headers. */
 	memcpy(header, skb->data, st->header_len);
@@ -1054,19 +965,19 @@
 	st->seqnum += skb_shinfo(skb)->gso_size;
 	if (st->out_len > skb_shinfo(skb)->gso_size) {
 		/* This packet will not finish the TSO burst. */
-		ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
+		st->packet_space = skb_shinfo(skb)->gso_size;
 		tsoh_th->fin = 0;
 		tsoh_th->psh = 0;
 	} else {
 		/* This packet will be the last in the TSO burst. */
-		ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
+		st->packet_space = st->out_len;
 		tsoh_th->fin = tcp_hdr(skb)->fin;
 		tsoh_th->psh = tcp_hdr(skb)->psh;
 	}
+	ip_length = st->ip_base_len + st->packet_space;
 
 	if (st->protocol == htons(ETH_P_IP)) {
-		struct iphdr *tsoh_iph =
-			(struct iphdr *)(header + SKB_IPV4_OFF(skb));
+		struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
 
 		tsoh_iph->tot_len = htons(ip_length);
 
@@ -1075,16 +986,16 @@
 		st->ipv4_id++;
 	} else {
 		struct ipv6hdr *tsoh_iph =
-			(struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
+			(struct ipv6hdr *)(header + st->ip_off);
 
-		tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
+		tsoh_iph->payload_len = htons(ip_length);
 	}
 
-	st->packet_space = skb_shinfo(skb)->gso_size;
-	++tx_queue->tso_packets;
+	rc = efx_tso_put_header(tx_queue, buffer, header);
+	if (unlikely(rc))
+		return rc;
 
-	/* Form a descriptor for this header. */
-	efx_tso_put_header(tx_queue, tsoh, st->header_len);
+	++tx_queue->tso_packets;
 
 	return 0;
 }
@@ -1099,13 +1010,13 @@
  *
  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
  * @skb was not enqueued.  In all cases @skb is consumed.  Return
- * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ * %NETDEV_TX_OK.
  */
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 			       struct sk_buff *skb)
 {
 	struct efx_nic *efx = tx_queue->efx;
-	int frag_i, rc, rc2 = NETDEV_TX_OK;
+	int frag_i, rc;
 	struct tso_state state;
 
 	/* Find the packet protocol and sanity-check it */
@@ -1137,11 +1048,7 @@
 		goto mem_err;
 
 	while (1) {
-		rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
-		if (unlikely(rc)) {
-			rc2 = NETDEV_TX_BUSY;
-			goto unwind;
-		}
+		tso_fill_packet_with_fragment(tx_queue, skb, &state);
 
 		/* Move onto the next fragment? */
 		if (state.in_len == 0) {
@@ -1165,6 +1072,8 @@
 	/* Pass off to hardware */
 	efx_nic_push_buffers(tx_queue);
 
+	efx_tx_maybe_stop_queue(tx_queue);
+
 	tx_queue->tso_bursts++;
 	return NETDEV_TX_OK;
 
@@ -1173,10 +1082,9 @@
 		  "Out of memory for TSO headers, or DMA mapping error\n");
 	dev_kfree_skb_any(skb);
 
- unwind:
 	/* Free the DMA mapping we were in the process of writing out */
 	if (state.unmap_len) {
-		if (state.unmap_single)
+		if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
 			dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
 					 state.unmap_len, DMA_TO_DEVICE);
 		else
@@ -1185,25 +1093,5 @@
 	}
 
 	efx_enqueue_unwind(tx_queue);
-	return rc2;
-}
-
-
-/*
- * Free up all TSO datastructures associated with tx_queue. This
- * routine should be called only once the tx_queue is both empty and
- * will no longer be used.
- */
-static void efx_fini_tso(struct efx_tx_queue *tx_queue)
-{
-	unsigned i;
-
-	if (tx_queue->buffer) {
-		for (i = 0; i <= tx_queue->ptr_mask; ++i)
-			efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
-	}
-
-	while (tx_queue->tso_headers_free != NULL)
-		efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
-				    &tx_queue->efx->pci_dev->dev);
+	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fd8882f..c136162 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2077,7 +2077,7 @@
 		goto error_netdev_register;
 	}
 
-	priv->stmmac_clk = clk_get(priv->device, NULL);
+	priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
 	if (IS_ERR(priv->stmmac_clk)) {
 		pr_warning("%s: warning: cannot get CSR clock\n", __func__);
 		goto error_clk_get;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cd01ee7..b93245c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -74,7 +74,7 @@
  * the necessary resources and invokes the main to init
  * the net device, register the mdio bus etc.
  */
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 {
 	int ret = 0;
 	struct resource *res;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 1b173a6..b26cbda 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -32,7 +32,7 @@
 
 config TI_DAVINCI_MDIO
 	tristate "TI DaVinci MDIO Support"
-	depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+	depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
 	select PHYLIB
 	---help---
 	  This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@
 
 config TI_DAVINCI_CPDMA
 	tristate "TI DaVinci CPDMA Support"
-	depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+	depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
 	---help---
 	  This driver supports TI's DaVinci CPDMA dma engine.
 
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1e5d85b..0cbc0e5 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -28,6 +28,9 @@
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
 
 #include <linux/platform_data/cpsw.h>
 
@@ -709,6 +712,158 @@
 	slave->sliver	= regs + data->sliver_reg_ofs;
 }
 
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
+			 struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *slave_node;
+	int i = 0, ret;
+	u32 prop;
+
+	if (!node)
+		return -EINVAL;
+
+	if (of_property_read_u32(node, "slaves", &prop)) {
+		pr_err("Missing slaves property in the DT.\n");
+		return -EINVAL;
+	}
+	data->slaves = prop;
+
+	data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
+				   data->slaves, GFP_KERNEL);
+	if (!data->slave_data) {
+		pr_err("Could not allocate slave memory.\n");
+		return -EINVAL;
+	}
+
+	data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
+
+	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+		pr_err("Missing cpdma_channels property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->channels = prop;
+
+	if (of_property_read_u32(node, "host_port_no", &prop)) {
+		pr_err("Missing host_port_no property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->host_port_num = prop;
+
+	if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
+		pr_err("Missing cpdma_reg_ofs property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->cpdma_reg_ofs = prop;
+
+	if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
+		pr_err("Missing cpdma_sram_ofs property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->cpdma_sram_ofs = prop;
+
+	if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
+		pr_err("Missing ale_reg_ofs property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->ale_reg_ofs = prop;
+
+	if (of_property_read_u32(node, "ale_entries", &prop)) {
+		pr_err("Missing ale_entries property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->ale_entries = prop;
+
+	if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
+		pr_err("Missing host_port_reg_ofs property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->host_port_reg_ofs = prop;
+
+	if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
+		pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->hw_stats_reg_ofs = prop;
+
+	if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
+		pr_err("Missing bd_ram_ofs property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->bd_ram_ofs = prop;
+
+	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
+		pr_err("Missing bd_ram_size property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->bd_ram_size = prop;
+
+	if (of_property_read_u32(node, "rx_descs", &prop)) {
+		pr_err("Missing rx_descs property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->rx_descs = prop;
+
+	if (of_property_read_u32(node, "mac_control", &prop)) {
+		pr_err("Missing mac_control property in the DT.\n");
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	data->mac_control = prop;
+
+	for_each_child_of_node(node, slave_node) {
+		struct cpsw_slave_data *slave_data = data->slave_data + i;
+		const char *phy_id = NULL;
+		const void *mac_addr = NULL;
+
+		if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
+			pr_err("Missing slave[%d] phy_id property\n", i);
+			ret = -EINVAL;
+			goto error_ret;
+		}
+		slave_data->phy_id = phy_id;
+
+		if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
+			pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
+			ret = -EINVAL;
+			goto error_ret;
+		}
+		slave_data->slave_reg_ofs = prop;
+
+		if (of_property_read_u32(slave_node, "sliver_reg_ofs",
+					 &prop)) {
+			pr_err("Missing slave[%d] sliver_reg_ofs property\n",
+				i);
+			ret = -EINVAL;
+			goto error_ret;
+		}
+		slave_data->sliver_reg_ofs = prop;
+
+		mac_addr = of_get_mac_address(slave_node);
+		if (mac_addr)
+			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+
+		i++;
+	}
+
+	return 0;
+
+error_ret:
+	kfree(data->slave_data);
+	return ret;
+}
+
 static int __devinit cpsw_probe(struct platform_device *pdev)
 {
 	struct cpsw_platform_data	*data = pdev->dev.platform_data;
@@ -720,11 +875,6 @@
 	struct resource			*res;
 	int ret = 0, i, k = 0;
 
-	if (!data) {
-		pr_err("platform data missing\n");
-		return -ENODEV;
-	}
-
 	ndev = alloc_etherdev(sizeof(struct cpsw_priv));
 	if (!ndev) {
 		pr_err("error allocating net_device\n");
@@ -734,13 +884,19 @@
 	platform_set_drvdata(pdev, ndev);
 	priv = netdev_priv(ndev);
 	spin_lock_init(&priv->lock);
-	priv->data = *data;
 	priv->pdev = pdev;
 	priv->ndev = ndev;
 	priv->dev  = &ndev->dev;
 	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
 	priv->rx_packet_max = max(rx_packet_max, 128);
 
+	if (cpsw_probe_dt(&priv->data, pdev)) {
+		pr_err("cpsw: platform data missing\n");
+		ret = -ENODEV;
+		goto clean_ndev_ret;
+	}
+	data = &priv->data;
+
 	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
 		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
 		pr_info("Detected MACID = %pM", priv->mac_addr);
@@ -996,11 +1152,17 @@
 	.resume		= cpsw_resume,
 };
 
+static const struct of_device_id cpsw_of_mtable[] = {
+	{ .compatible = "ti,cpsw", },
+	{ /* sentinel */ },
+};
+
 static struct platform_driver cpsw_driver = {
 	.driver = {
 		.name	 = "cpsw",
 		.owner	 = THIS_MODULE,
 		.pm	 = &cpsw_pm_ops,
+		.of_match_table = of_match_ptr(cpsw_of_mtable),
 	},
 	.probe = cpsw_probe,
 	.remove = __devexit_p(cpsw_remove),
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 3b5c457..d15c888 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -538,11 +538,12 @@
 
 int cpdma_chan_destroy(struct cpdma_chan *chan)
 {
-	struct cpdma_ctlr *ctlr = chan->ctlr;
+	struct cpdma_ctlr *ctlr;
 	unsigned long flags;
 
 	if (!chan)
 		return -EINVAL;
+	ctlr = chan->ctlr;
 
 	spin_lock_irqsave(&ctlr->lock, flags);
 	if (chan->state != CPDMA_STATE_IDLE)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cd7ee20..573f3be 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -36,6 +36,8 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 /*
  * This timeout definition is a worst-case ultra defensive measure against
@@ -289,6 +291,25 @@
 	return 0;
 }
 
+static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
+			 struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	u32 prop;
+
+	if (!node)
+		return -EINVAL;
+
+	if (of_property_read_u32(node, "bus_freq", &prop)) {
+		pr_err("Missing bus_freq property in the DT.\n");
+		return -EINVAL;
+	}
+	data->bus_freq = prop;
+
+	return 0;
+}
+
+
 static int __devinit davinci_mdio_probe(struct platform_device *pdev)
 {
 	struct mdio_platform_data *pdata = pdev->dev.platform_data;
@@ -304,8 +325,6 @@
 		return -ENOMEM;
 	}
 
-	data->pdata = pdata ? (*pdata) : default_pdata;
-
 	data->bus = mdiobus_alloc();
 	if (!data->bus) {
 		dev_err(dev, "failed to alloc mii bus\n");
@@ -313,14 +332,22 @@
 		goto bail_out;
 	}
 
+	if (dev->of_node) {
+		if (davinci_mdio_probe_dt(&data->pdata, pdev))
+			data->pdata = default_pdata;
+		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+	} else {
+		data->pdata = pdata ? (*pdata) : default_pdata;
+		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+			 pdev->name, pdev->id);
+	}
+
 	data->bus->name		= dev_name(dev);
 	data->bus->read		= davinci_mdio_read,
 	data->bus->write	= davinci_mdio_write,
 	data->bus->reset	= davinci_mdio_reset,
 	data->bus->parent	= dev;
 	data->bus->priv		= data;
-	snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
-		pdev->name, pdev->id);
 
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_get_sync(&pdev->dev);
@@ -454,11 +481,17 @@
 	.resume		= davinci_mdio_resume,
 };
 
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+	{ .compatible = "ti,davinci_mdio", },
+	{ /* sentinel */ },
+};
+
 static struct platform_driver davinci_mdio_driver = {
 	.driver = {
 		.name	 = "davinci_mdio",
 		.owner	 = THIS_MODULE,
 		.pm	 = &davinci_mdio_pm_ops,
+		.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
 	},
 	.probe = davinci_mdio_probe,
 	.remove = __devexit_p(davinci_mdio_remove),
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a5826a3..2c08bf6 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,8 +637,7 @@
 	if (data && is_valid_ether_addr(data->mac_addr)) {
 		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
 	} else {
-		eth_random_addr(ndev->dev_addr);
-		ndev->addr_assign_type |= NET_ADDR_RANDOM;
+		eth_hw_addr_random(ndev);
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index bdd8891..88943d9 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,8 +557,7 @@
 	if (data && is_valid_ether_addr(data->mac_addr)) {
 		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
 	} else {
-		eth_random_addr(ndev->dev_addr);
-		ndev->addr_assign_type |= NET_ADDR_RANDOM;
+		eth_hw_addr_random(ndev);
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 482648f..98934bd 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1003,6 +1003,7 @@
 }
 
 int ixp46x_phc_index = -1;
+EXPORT_SYMBOL_GPL(ixp46x_phc_index);
 
 static int ixp4xx_get_ts_info(struct net_device *dev,
 			      struct ethtool_ts_info *info)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6cee291..4a1a5f5 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -383,13 +383,6 @@
 	unsigned long flags;
 
 	net_device = hv_get_drvdata(device);
-	spin_lock_irqsave(&device->channel->inbound_lock, flags);
-	net_device->destroy = true;
-	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
-
-	/* Wait for all send completions */
-	wait_event(net_device->wait_drain,
-		   atomic_read(&net_device->num_outstanding_sends) == 0);
 
 	netvsc_disconnect_vsp(net_device);
 
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8c5a1c4..e91111a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -400,7 +400,7 @@
 	ndev_ctx = container_of(w, struct net_device_context, dwork.work);
 	net_device = hv_get_drvdata(ndev_ctx->device_ctx);
 	net = net_device->ndev;
-	netif_notify_peers(net);
+	netdev_notify_peers(net);
 }
 
 
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index e5d6146..06f8601 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -46,8 +46,14 @@
 	/* Simplify allocation by having a netvsc packet inline */
 	struct hv_netvsc_packet	pkt;
 	struct hv_page_buffer buf;
-	/* FIXME: We assumed a fixed size request here. */
+
 	struct rndis_message request_msg;
+	/*
+	 * The buffer for the extended info after the RNDIS message. It's
+	 * referenced based on the data offset in the RNDIS message. Its size
+	 * is enough for current needs, and should be sufficient for the near
+	 * future.
+	 */
 	u8 ext[100];
 };
 
@@ -718,6 +724,9 @@
 {
 	struct rndis_request *request;
 	struct rndis_halt_request *halt;
+	struct netvsc_device *nvdev = dev->net_dev;
+	struct hv_device *hdev = nvdev->dev;
+	ulong flags;
 
 	/* Attempt to do a rndis device halt */
 	request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -735,6 +744,14 @@
 	dev->state = RNDIS_DEV_UNINITIALIZED;
 
 cleanup:
+	spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
+	nvdev->destroy = true;
+	spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
+
+	/* Wait for all send completions */
+	wait_event(nvdev->wait_drain,
+		atomic_read(&nvdev->num_outstanding_sends) == 0);
+
 	if (request)
 		put_rndis_request(dev, request);
 	return;
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index a561ae4..c6a0299 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -158,7 +158,7 @@
 	/* If not add the 'RPOLC', we can't catch the receive interrupt.
 	 * It's related with the HW layout and the IR transiver.
 	 */
-	val |= IREN | RPOLC;
+	val |= UMOD_IRDA | RPOLC;
 	UART_PUT_GCTL(port, val);
 	return ret;
 }
@@ -432,7 +432,7 @@
 	bfin_sir_stop_rx(port);
 
 	val = UART_GET_GCTL(port);
-	val &= ~(UCEN | IREN | RPOLC);
+	val &= ~(UCEN | UMOD_MASK | RPOLC);
 	UART_PUT_GCTL(port, val);
 
 #ifdef CONFIG_SIR_BFIN_DMA
@@ -518,10 +518,10 @@
 	 * reset all the UART.
 	 */
 	val = UART_GET_GCTL(port);
-	val &= ~(IREN | RPOLC);
+	val &= ~(UMOD_MASK | RPOLC);
 	UART_PUT_GCTL(port, val);
 	SSYNC();
-	val |= IREN | RPOLC;
+	val |= UMOD_IRDA | RPOLC;
 	UART_PUT_GCTL(port, val);
 	SSYNC();
 	/* bfin_sir_set_speed(port, self->speed); */
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 824e2a9..5f3aeac 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -542,6 +542,7 @@
 	sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
 	kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
 	if (!kingsun->irlap) {
+		err = -ENOMEM;
 		dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
 		goto free_mem;
 	}
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5a278ab..2d4b6a1 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -436,6 +436,7 @@
 	sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
 	kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
 	if (!kingsun->irlap) {
+		err = -ENOMEM;
 		dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
 		goto free_mem;
 	}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index e2a06fd..4a075ba 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -197,6 +197,7 @@
 	if (err)
 		goto out_free_netdev;
 
+	BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
 	net->loopback_dev = dev;
 	return 0;
 
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0737bd4..0f0f9ce 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -94,7 +94,8 @@
 	int i;
 
 	for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
-		if (rcu_dereference(vlan->taps[i]) == q)
+		if (rcu_dereference_protected(vlan->taps[i],
+					      lockdep_is_held(&macvtap_lock)) == q)
 			return i;
 	}
 
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index f9347ea..b332112 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -640,15 +640,9 @@
 				 * rtnl_lock already held
 				 */
 				if (nt->np.dev) {
-					spin_unlock_irqrestore(
-							      &target_list_lock,
-							      flags);
 					__netpoll_cleanup(&nt->np);
-					spin_lock_irqsave(&target_list_lock,
-							  flags);
 					dev_put(nt->np.dev);
 					nt->np.dev = NULL;
-					netconsole_target_put(nt);
 				}
 				nt->enabled = 0;
 				stopped = true;
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e0cc4ef..eefe49e 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -101,7 +101,6 @@
 		n--;
 		gpio_free(s->gpio[n]);
 	}
-	devm_kfree(&pdev->dev, s);
 	return r;
 }
 
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 5c12018..4d4d25e 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -132,7 +132,7 @@
 	pb->mii_bus = parent_bus;
 
 	ret_val = -ENODEV;
-	for_each_child_of_node(dev->of_node, child_bus_node) {
+	for_each_available_child_of_node(dev->of_node, child_bus_node) {
 		u32 v;
 
 		r = of_property_read_u32(child_bus_node, "reg", &v);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7ca2ff9..ef9ea92 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1035,66 +1035,6 @@
 	bus->write(bus, addr, MII_MMD_DATA, data);
 }
 
-static u32 phy_eee_to_adv(u16 eee_adv)
-{
-	u32 adv = 0;
-
-	if (eee_adv & MDIO_EEE_100TX)
-		adv |= ADVERTISED_100baseT_Full;
-	if (eee_adv & MDIO_EEE_1000T)
-		adv |= ADVERTISED_1000baseT_Full;
-	if (eee_adv & MDIO_EEE_10GT)
-		adv |= ADVERTISED_10000baseT_Full;
-	if (eee_adv & MDIO_EEE_1000KX)
-		adv |= ADVERTISED_1000baseKX_Full;
-	if (eee_adv & MDIO_EEE_10GKX4)
-		adv |= ADVERTISED_10000baseKX4_Full;
-	if (eee_adv & MDIO_EEE_10GKR)
-		adv |= ADVERTISED_10000baseKR_Full;
-
-	return adv;
-}
-
-static u32 phy_eee_to_supported(u16 eee_caported)
-{
-	u32 supported = 0;
-
-	if (eee_caported & MDIO_EEE_100TX)
-		supported |= SUPPORTED_100baseT_Full;
-	if (eee_caported & MDIO_EEE_1000T)
-		supported |= SUPPORTED_1000baseT_Full;
-	if (eee_caported & MDIO_EEE_10GT)
-		supported |= SUPPORTED_10000baseT_Full;
-	if (eee_caported & MDIO_EEE_1000KX)
-		supported |= SUPPORTED_1000baseKX_Full;
-	if (eee_caported & MDIO_EEE_10GKX4)
-		supported |= SUPPORTED_10000baseKX4_Full;
-	if (eee_caported & MDIO_EEE_10GKR)
-		supported |= SUPPORTED_10000baseKR_Full;
-
-	return supported;
-}
-
-static u16 phy_adv_to_eee(u32 adv)
-{
-	u16 reg = 0;
-
-	if (adv & ADVERTISED_100baseT_Full)
-		reg |= MDIO_EEE_100TX;
-	if (adv & ADVERTISED_1000baseT_Full)
-		reg |= MDIO_EEE_1000T;
-	if (adv & ADVERTISED_10000baseT_Full)
-		reg |= MDIO_EEE_10GT;
-	if (adv & ADVERTISED_1000baseKX_Full)
-		reg |= MDIO_EEE_1000KX;
-	if (adv & ADVERTISED_10000baseKX4_Full)
-		reg |= MDIO_EEE_10GKX4;
-	if (adv & ADVERTISED_10000baseKR_Full)
-		reg |= MDIO_EEE_10GKR;
-
-	return reg;
-}
-
 /**
  * phy_init_eee - init and check the EEE feature
  * @phydev: target phy_device struct
@@ -1132,7 +1072,7 @@
 		if (eee_cap < 0)
 			return eee_cap;
 
-		cap = phy_eee_to_supported(eee_cap);
+		cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
 		if (!cap)
 			goto eee_exit;
 
@@ -1149,8 +1089,8 @@
 		if (eee_adv < 0)
 			return eee_adv;
 
-		adv = phy_eee_to_adv(eee_adv);
-		lp = phy_eee_to_adv(eee_lp);
+		adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
 		idx = phy_find_setting(phydev->speed, phydev->duplex);
 		if ((lp & adv & settings[idx].setting))
 			goto eee_exit;
@@ -1210,21 +1150,21 @@
 				    MDIO_MMD_PCS, phydev->addr);
 	if (val < 0)
 		return val;
-	data->supported = phy_eee_to_supported(val);
+	data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
 
 	/* Get advertisement EEE */
 	val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
 				    MDIO_MMD_AN, phydev->addr);
 	if (val < 0)
 		return val;
-	data->advertised = phy_eee_to_adv(val);
+	data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
 	/* Get LP advertisement EEE */
 	val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
 				    MDIO_MMD_AN, phydev->addr);
 	if (val < 0)
 		return val;
-	data->lp_advertised = phy_eee_to_adv(val);
+	data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
 	return 0;
 }
@@ -1241,7 +1181,7 @@
 {
 	int val;
 
-	val = phy_adv_to_eee(data->advertised);
+	val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
 	phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
 			       phydev->addr, val);
 
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 5c05572..eb3f5ce 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -94,6 +94,18 @@
 #define PF_TO_CHANNEL(pf)	PF_TO_X(pf, struct channel)
 
 /*
+ * Data structure to hold primary network stats for which
+ * we want to use 64 bit storage.  Other network stats
+ * are stored in dev->stats of the ppp strucute.
+ */
+struct ppp_link_stats {
+	u64 rx_packets;
+	u64 tx_packets;
+	u64 rx_bytes;
+	u64 tx_bytes;
+};
+
+/*
  * Data structure describing one ppp unit.
  * A ppp unit corresponds to a ppp network interface device
  * and represents a multilink bundle.
@@ -136,6 +148,7 @@
 	unsigned pass_len, active_len;
 #endif /* CONFIG_PPP_FILTER */
 	struct net	*ppp_net;	/* the net we belong to */
+	struct ppp_link_stats stats64;	/* 64 bit network stats */
 };
 
 /*
@@ -1021,9 +1034,34 @@
 	return err;
 }
 
+struct rtnl_link_stats64*
+ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+{
+	struct ppp *ppp = netdev_priv(dev);
+
+	ppp_recv_lock(ppp);
+	stats64->rx_packets = ppp->stats64.rx_packets;
+	stats64->rx_bytes   = ppp->stats64.rx_bytes;
+	ppp_recv_unlock(ppp);
+
+	ppp_xmit_lock(ppp);
+	stats64->tx_packets = ppp->stats64.tx_packets;
+	stats64->tx_bytes   = ppp->stats64.tx_bytes;
+	ppp_xmit_unlock(ppp);
+
+	stats64->rx_errors        = dev->stats.rx_errors;
+	stats64->tx_errors        = dev->stats.tx_errors;
+	stats64->rx_dropped       = dev->stats.rx_dropped;
+	stats64->tx_dropped       = dev->stats.tx_dropped;
+	stats64->rx_length_errors = dev->stats.rx_length_errors;
+
+	return stats64;
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
-	.ndo_start_xmit = ppp_start_xmit,
-	.ndo_do_ioctl   = ppp_net_ioctl,
+	.ndo_start_xmit  = ppp_start_xmit,
+	.ndo_do_ioctl    = ppp_net_ioctl,
+	.ndo_get_stats64 = ppp_get_stats64,
 };
 
 static void ppp_setup(struct net_device *dev)
@@ -1157,8 +1195,8 @@
 #endif /* CONFIG_PPP_FILTER */
 	}
 
-	++ppp->dev->stats.tx_packets;
-	ppp->dev->stats.tx_bytes += skb->len - 2;
+	++ppp->stats64.tx_packets;
+	ppp->stats64.tx_bytes += skb->len - 2;
 
 	switch (proto) {
 	case PPP_IP:
@@ -1745,8 +1783,8 @@
 		break;
 	}
 
-	++ppp->dev->stats.rx_packets;
-	ppp->dev->stats.rx_bytes += skb->len - 2;
+	++ppp->stats64.rx_packets;
+	ppp->stats64.rx_bytes += skb->len - 2;
 
 	npi = proto_to_npindex(proto);
 	if (npi < 0) {
@@ -2570,12 +2608,12 @@
 	struct slcompress *vj = ppp->vj;
 
 	memset(st, 0, sizeof(*st));
-	st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
+	st->p.ppp_ipackets = ppp->stats64.rx_packets;
 	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
-	st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
-	st->p.ppp_opackets = ppp->dev->stats.tx_packets;
+	st->p.ppp_ibytes = ppp->stats64.rx_bytes;
+	st->p.ppp_opackets = ppp->stats64.tx_packets;
 	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
-	st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
+	st->p.ppp_obytes = ppp->stats64.tx_bytes;
 	if (!vj)
 		return;
 	st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1c98321..162464f 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -189,7 +189,7 @@
 	if (sk_pppox(po)->sk_state & PPPOX_DEAD)
 		goto tx_error;
 
-	rt = ip_route_output_ports(&init_net, &fl4, NULL,
+	rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
 				   opt->dst_addr.sin_addr.s_addr,
 				   opt->src_addr.sin_addr.s_addr,
 				   0, 0, IPPROTO_GRE,
@@ -468,7 +468,7 @@
 	po->chan.private = sk;
 	po->chan.ops = &pptp_chan_ops;
 
-	rt = ip_route_output_ports(&init_net, &fl4, sk,
+	rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
 				   opt->dst_addr.sin_addr.s_addr,
 				   opt->src_addr.sin_addr.s_addr,
 				   0, 0,
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 6a7260b..6b08bd4 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -21,7 +21,7 @@
 	---help---
 	  Basic mode where packets are transmitted always by all suitable ports.
 
-	  All added ports are setup to have team's mac address.
+	  All added ports are setup to have team's device address.
 
 	  To compile this team mode as a module, choose M here: the module
 	  will be called team_mode_broadcast.
@@ -33,7 +33,7 @@
 	  Basic mode where port used for transmitting packets is selected in
 	  round-robin fashion using packet counter.
 
-	  All added ports are setup to have team's mac address.
+	  All added ports are setup to have team's device address.
 
 	  To compile this team mode as a module, choose M here: the module
 	  will be called team_mode_roundrobin.
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 87707ab..b4f67b5 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -54,29 +54,29 @@
 }
 
 /*
- * Since the ability to change mac address for open port device is tested in
+ * Since the ability to change device address for open port device is tested in
  * team_port_add, this function can be called without control of return value
  */
-static int __set_port_mac(struct net_device *port_dev,
-			  const unsigned char *dev_addr)
+static int __set_port_dev_addr(struct net_device *port_dev,
+			       const unsigned char *dev_addr)
 {
 	struct sockaddr addr;
 
-	memcpy(addr.sa_data, dev_addr, ETH_ALEN);
-	addr.sa_family = ARPHRD_ETHER;
+	memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
+	addr.sa_family = port_dev->type;
 	return dev_set_mac_address(port_dev, &addr);
 }
 
-static int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_dev_addr(struct team_port *port)
 {
-	return __set_port_mac(port->dev, port->orig.dev_addr);
+	return __set_port_dev_addr(port->dev, port->orig.dev_addr);
 }
 
-int team_port_set_team_mac(struct team_port *port)
+int team_port_set_team_dev_addr(struct team_port *port)
 {
-	return __set_port_mac(port->dev, port->team->dev->dev_addr);
+	return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
 }
-EXPORT_SYMBOL(team_port_set_team_mac);
+EXPORT_SYMBOL(team_port_set_team_dev_addr);
 
 static void team_refresh_port_linkup(struct team_port *port)
 {
@@ -658,6 +658,122 @@
 }
 
 
+/*************************************
+ * Multiqueue Tx port select override
+ *************************************/
+
+static int team_queue_override_init(struct team *team)
+{
+	struct list_head *listarr;
+	unsigned int queue_cnt = team->dev->num_tx_queues - 1;
+	unsigned int i;
+
+	if (!queue_cnt)
+		return 0;
+	listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
+	if (!listarr)
+		return -ENOMEM;
+	team->qom_lists = listarr;
+	for (i = 0; i < queue_cnt; i++)
+		INIT_LIST_HEAD(listarr++);
+	return 0;
+}
+
+static void team_queue_override_fini(struct team *team)
+{
+	kfree(team->qom_lists);
+}
+
+static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
+{
+	return &team->qom_lists[queue_id - 1];
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
+{
+	struct list_head *qom_list;
+	struct team_port *port;
+
+	if (!team->queue_override_enabled || !skb->queue_mapping)
+		return false;
+	qom_list = __team_get_qom_list(team, skb->queue_mapping);
+	list_for_each_entry_rcu(port, qom_list, qom_list) {
+		if (!team_dev_queue_xmit(team, port, skb))
+			return true;
+	}
+	return false;
+}
+
+static void __team_queue_override_port_del(struct team *team,
+					   struct team_port *port)
+{
+	list_del_rcu(&port->qom_list);
+	synchronize_rcu();
+	INIT_LIST_HEAD(&port->qom_list);
+}
+
+static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
+						      struct team_port *cur)
+{
+	if (port->priority < cur->priority)
+		return true;
+	if (port->priority > cur->priority)
+		return false;
+	if (port->index < cur->index)
+		return true;
+	return false;
+}
+
+static void __team_queue_override_port_add(struct team *team,
+					   struct team_port *port)
+{
+	struct team_port *cur;
+	struct list_head *qom_list;
+	struct list_head *node;
+
+	if (!port->queue_id || !team_port_enabled(port))
+		return;
+
+	qom_list = __team_get_qom_list(team, port->queue_id);
+	node = qom_list;
+	list_for_each_entry(cur, qom_list, qom_list) {
+		if (team_queue_override_port_has_gt_prio_than(port, cur))
+			break;
+		node = &cur->qom_list;
+	}
+	list_add_tail_rcu(&port->qom_list, node);
+}
+
+static void __team_queue_override_enabled_check(struct team *team)
+{
+	struct team_port *port;
+	bool enabled = false;
+
+	list_for_each_entry(port, &team->port_list, list) {
+		if (!list_empty(&port->qom_list)) {
+			enabled = true;
+			break;
+		}
+	}
+	if (enabled == team->queue_override_enabled)
+		return;
+	netdev_dbg(team->dev, "%s queue override\n",
+		   enabled ? "Enabling" : "Disabling");
+	team->queue_override_enabled = enabled;
+}
+
+static void team_queue_override_port_refresh(struct team *team,
+					     struct team_port *port)
+{
+	__team_queue_override_port_del(team, port);
+	__team_queue_override_port_add(team, port);
+	__team_queue_override_enabled_check(team);
+}
+
+
 /****************
  * Port handling
  ****************/
@@ -688,6 +804,7 @@
 	hlist_add_head_rcu(&port->hlist,
 			   team_port_index_hash(team, port->index));
 	team_adjust_ops(team);
+	team_queue_override_port_refresh(team, port);
 	if (team->ops.port_enabled)
 		team->ops.port_enabled(team, port);
 }
@@ -716,6 +833,7 @@
 	hlist_del_rcu(&port->hlist);
 	__reconstruct_port_hlist(team, port->index);
 	port->index = -1;
+	team_queue_override_port_refresh(team, port);
 	__team_adjust_ops(team, team->en_port_count - 1);
 	/*
 	 * Wait until readers see adjusted ops. This ensures that
@@ -795,16 +913,17 @@
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port,
+				    gfp_t gfp)
 {
 	struct netpoll *np;
 	int err;
 
-	np = kzalloc(sizeof(*np), GFP_KERNEL);
+	np = kzalloc(sizeof(*np), gfp);
 	if (!np)
 		return -ENOMEM;
 
-	err = __netpoll_setup(np, port->dev);
+	err = __netpoll_setup(np, port->dev, gfp);
 	if (err) {
 		kfree(np);
 		return err;
@@ -833,7 +952,8 @@
 }
 
 #else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port,
+				    gfp_t gfp)
 {
 	return 0;
 }
@@ -847,6 +967,8 @@
 #endif
 
 static void __team_port_change_check(struct team_port *port, bool linkup);
+static int team_dev_type_check_change(struct net_device *dev,
+				      struct net_device *port_dev);
 
 static int team_port_add(struct team *team, struct net_device *port_dev)
 {
@@ -855,9 +977,8 @@
 	char *portname = port_dev->name;
 	int err;
 
-	if (port_dev->flags & IFF_LOOPBACK ||
-	    port_dev->type != ARPHRD_ETHER) {
-		netdev_err(dev, "Device %s is of an unsupported type\n",
+	if (port_dev->flags & IFF_LOOPBACK) {
+		netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
 			   portname);
 		return -EINVAL;
 	}
@@ -868,6 +989,17 @@
 		return -EBUSY;
 	}
 
+	if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+	    vlan_uses_dev(dev)) {
+		netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
+			   portname);
+		return -EPERM;
+	}
+
+	err = team_dev_type_check_change(dev, port_dev);
+	if (err)
+		return err;
+
 	if (port_dev->flags & IFF_UP) {
 		netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
 			   portname);
@@ -881,6 +1013,7 @@
 
 	port->dev = port_dev;
 	port->team = team;
+	INIT_LIST_HEAD(&port->qom_list);
 
 	port->orig.mtu = port_dev->mtu;
 	err = dev_set_mtu(port_dev, dev->mtu);
@@ -889,7 +1022,7 @@
 		goto err_set_mtu;
 	}
 
-	memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+	memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
 
 	err = team_port_enter(team, port);
 	if (err) {
@@ -913,7 +1046,7 @@
 	}
 
 	if (team_netpoll_info(team)) {
-		err = team_port_enable_netpoll(team, port);
+		err = team_port_enable_netpoll(team, port, GFP_KERNEL);
 		if (err) {
 			netdev_err(dev, "Failed to enable netpoll on device %s\n",
 				   portname);
@@ -970,7 +1103,7 @@
 
 err_dev_open:
 	team_port_leave(team, port);
-	team_port_set_orig_mac(port);
+	team_port_set_orig_dev_addr(port);
 
 err_port_enter:
 	dev_set_mtu(port_dev, port->orig.mtu);
@@ -1007,7 +1140,7 @@
 	vlan_vids_del_by_dev(port_dev, dev);
 	dev_close(port_dev);
 	team_port_leave(team, port);
-	team_port_set_orig_mac(port);
+	team_port_set_orig_dev_addr(port);
 	dev_set_mtu(port_dev, port->orig.mtu);
 	synchronize_rcu();
 	kfree(port);
@@ -1092,6 +1225,49 @@
 	return 0;
 }
 
+static int team_priority_option_get(struct team *team,
+				    struct team_gsetter_ctx *ctx)
+{
+	struct team_port *port = ctx->info->port;
+
+	ctx->data.s32_val = port->priority;
+	return 0;
+}
+
+static int team_priority_option_set(struct team *team,
+				    struct team_gsetter_ctx *ctx)
+{
+	struct team_port *port = ctx->info->port;
+
+	port->priority = ctx->data.s32_val;
+	team_queue_override_port_refresh(team, port);
+	return 0;
+}
+
+static int team_queue_id_option_get(struct team *team,
+				    struct team_gsetter_ctx *ctx)
+{
+	struct team_port *port = ctx->info->port;
+
+	ctx->data.u32_val = port->queue_id;
+	return 0;
+}
+
+static int team_queue_id_option_set(struct team *team,
+				    struct team_gsetter_ctx *ctx)
+{
+	struct team_port *port = ctx->info->port;
+
+	if (port->queue_id == ctx->data.u32_val)
+		return 0;
+	if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+		return -EINVAL;
+	port->queue_id = ctx->data.u32_val;
+	team_queue_override_port_refresh(team, port);
+	return 0;
+}
+
+
 static const struct team_option team_options[] = {
 	{
 		.name = "mode",
@@ -1120,6 +1296,20 @@
 		.getter = team_user_linkup_en_option_get,
 		.setter = team_user_linkup_en_option_set,
 	},
+	{
+		.name = "priority",
+		.type = TEAM_OPTION_TYPE_S32,
+		.per_port = true,
+		.getter = team_priority_option_get,
+		.setter = team_priority_option_set,
+	},
+	{
+		.name = "queue_id",
+		.type = TEAM_OPTION_TYPE_U32,
+		.per_port = true,
+		.getter = team_queue_id_option_get,
+		.setter = team_queue_id_option_set,
+	},
 };
 
 static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1155,6 +1345,9 @@
 	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
 		INIT_HLIST_HEAD(&team->en_port_hlist[i]);
 	INIT_LIST_HEAD(&team->port_list);
+	err = team_queue_override_init(team);
+	if (err)
+		goto err_team_queue_override_init;
 
 	team_adjust_ops(team);
 
@@ -1170,6 +1363,8 @@
 	return 0;
 
 err_options_register:
+	team_queue_override_fini(team);
+err_team_queue_override_init:
 	free_percpu(team->pcpu_stats);
 
 	return err;
@@ -1187,6 +1382,7 @@
 
 	__team_change_mode(team, NULL); /* cleanup */
 	__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+	team_queue_override_fini(team);
 	mutex_unlock(&team->lock);
 }
 
@@ -1216,10 +1412,12 @@
 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct team *team = netdev_priv(dev);
-	bool tx_success = false;
+	bool tx_success;
 	unsigned int len = skb->len;
 
-	tx_success = team->ops.transmit(team, skb);
+	tx_success = team_queue_override_transmit(team, skb);
+	if (!tx_success)
+		tx_success = team->ops.transmit(team, skb);
 	if (tx_success) {
 		struct team_pcpu_stats *pcpu_stats;
 
@@ -1293,17 +1491,18 @@
 
 static int team_set_mac_address(struct net_device *dev, void *p)
 {
+	struct sockaddr *addr = p;
 	struct team *team = netdev_priv(dev);
 	struct team_port *port;
-	int err;
 
-	err = eth_mac_addr(dev, p);
-	if (err)
-		return err;
+	if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
 	rcu_read_lock();
 	list_for_each_entry_rcu(port, &team->port_list, list)
-		if (team->ops.port_change_mac)
-			team->ops.port_change_mac(team, port);
+		if (team->ops.port_change_dev_addr)
+			team->ops.port_change_dev_addr(team, port);
 	rcu_read_unlock();
 	return 0;
 }
@@ -1443,7 +1642,7 @@
 }
 
 static int team_netpoll_setup(struct net_device *dev,
-			      struct netpoll_info *npifo)
+			      struct netpoll_info *npifo, gfp_t gfp)
 {
 	struct team *team = netdev_priv(dev);
 	struct team_port *port;
@@ -1451,7 +1650,7 @@
 
 	mutex_lock(&team->lock);
 	list_for_each_entry(port, &team->port_list, list) {
-		err = team_port_enable_netpoll(team, port);
+		err = team_port_enable_netpoll(team, port, gfp);
 		if (err) {
 			__team_netpoll_cleanup(team);
 			break;
@@ -1534,6 +1733,45 @@
  * rt netlink interface
  ***********************/
 
+static void team_setup_by_port(struct net_device *dev,
+			       struct net_device *port_dev)
+{
+	dev->header_ops	= port_dev->header_ops;
+	dev->type = port_dev->type;
+	dev->hard_header_len = port_dev->hard_header_len;
+	dev->addr_len = port_dev->addr_len;
+	dev->mtu = port_dev->mtu;
+	memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
+	memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+}
+
+static int team_dev_type_check_change(struct net_device *dev,
+				      struct net_device *port_dev)
+{
+	struct team *team = netdev_priv(dev);
+	char *portname = port_dev->name;
+	int err;
+
+	if (dev->type == port_dev->type)
+		return 0;
+	if (!list_empty(&team->port_list)) {
+		netdev_err(dev, "Device %s is of different type\n", portname);
+		return -EBUSY;
+	}
+	err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
+	err = notifier_to_errno(err);
+	if (err) {
+		netdev_err(dev, "Refused to change device type\n");
+		return err;
+	}
+	dev_uc_flush(dev);
+	dev_mc_flush(dev);
+	team_setup_by_port(dev, port_dev);
+	call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
+	return 0;
+}
+
 static void team_setup(struct net_device *dev)
 {
 	ether_setup(dev);
@@ -1787,6 +2025,12 @@
 		    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
 			goto nest_cancel;
 		break;
+	case TEAM_OPTION_TYPE_S32:
+		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
+			goto nest_cancel;
+		if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
+			goto nest_cancel;
+		break;
 	default:
 		BUG();
 	}
@@ -1975,6 +2219,9 @@
 		case NLA_FLAG:
 			opt_type = TEAM_OPTION_TYPE_BOOL;
 			break;
+		case NLA_S32:
+			opt_type = TEAM_OPTION_TYPE_S32;
+			break;
 		default:
 			goto team_put;
 		}
@@ -2031,6 +2278,9 @@
 			case TEAM_OPTION_TYPE_BOOL:
 				ctx.data.bool_val = attr_data ? true : false;
 				break;
+			case TEAM_OPTION_TYPE_S32:
+				ctx.data.s32_val = nla_get_s32(attr_data);
+				break;
 			default:
 				BUG();
 			}
@@ -2243,7 +2493,7 @@
 			list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
 	}
 	err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
-	if (err)
+	if (err && err != -ESRCH)
 		netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
 			    err);
 }
@@ -2274,9 +2524,9 @@
 
 send_event:
 	err = team_nl_send_event_port_list_get(port->team);
-	if (err)
-		netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
-			    port->dev->name);
+	if (err && err != -ESRCH)
+		netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
+			    port->dev->name, err);
 
 }
 
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index c96e4d2..9db0171 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -48,18 +48,18 @@
 
 static int bc_port_enter(struct team *team, struct team_port *port)
 {
-	return team_port_set_team_mac(port);
+	return team_port_set_team_dev_addr(port);
 }
 
-static void bc_port_change_mac(struct team *team, struct team_port *port)
+static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
 {
-	team_port_set_team_mac(port);
+	team_port_set_team_dev_addr(port);
 }
 
 static const struct team_mode_ops bc_mode_ops = {
 	.transmit		= bc_transmit,
 	.port_enter		= bc_port_enter,
-	.port_change_mac	= bc_port_change_mac,
+	.port_change_dev_addr	= bc_port_change_dev_addr,
 };
 
 static const struct team_mode bc_mode = {
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index ad7ed0e..105135a 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -66,18 +66,18 @@
 
 static int rr_port_enter(struct team *team, struct team_port *port)
 {
-	return team_port_set_team_mac(port);
+	return team_port_set_team_dev_addr(port);
 }
 
-static void rr_port_change_mac(struct team *team, struct team_port *port)
+static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
 {
-	team_port_set_team_mac(port);
+	team_port_set_team_dev_addr(port);
 }
 
 static const struct team_mode_ops rr_mode_ops = {
 	.transmit		= rr_transmit,
 	.port_enter		= rr_port_enter,
-	.port_change_mac	= rr_port_change_mac,
+	.port_change_dev_addr	= rr_port_change_dev_addr,
 };
 
 static const struct team_mode rr_mode = {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a9bd9f3..498dc0d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -187,7 +187,6 @@
 	netif_tx_lock_bh(tun->dev);
 	netif_carrier_off(tun->dev);
 	tun->tfile = NULL;
-	tun->socket.file = NULL;
 	netif_tx_unlock_bh(tun->dev);
 
 	/* Drop read queue */
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 6461004..7d78669 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -232,6 +232,7 @@
 		struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
 
 		if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
+			usb_free_urb(req);
 			usbpn_close(dev);
 			return -ENOMEM;
 		}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f4ce5957..4cd582a 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1225,6 +1225,26 @@
 	  .driver_info = (unsigned long) &wwan_info,
 	},
 
+	/* Dell branded MBM devices like DW5550 */
+	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+		| USB_DEVICE_ID_MATCH_VENDOR,
+	  .idVendor = 0x413c,
+	  .bInterfaceClass = USB_CLASS_COMM,
+	  .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+	  .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+	  .driver_info = (unsigned long) &wwan_info,
+	},
+
+	/* Toshiba branded MBM devices */
+	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+		| USB_DEVICE_ID_MATCH_VENDOR,
+	  .idVendor = 0x0930,
+	  .bInterfaceClass = USB_CLASS_COMM,
+	  .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+	  .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+	  .driver_info = (unsigned long) &wwan_info,
+	},
+
 	/* Generic CDC-NCM devices */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM,
 		USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 2ea126a..328397c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -247,30 +247,12 @@
  */
 static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
 {
-	int rv;
 	struct qmi_wwan_state *info = (void *)&dev->data;
 
-	/* ZTE makes devices where the interface descriptors and endpoint
-	 * configurations of two or more interfaces are identical, even
-	 * though the functions are completely different.  If set, then
-	 * driver_info->data is a bitmap of acceptable interface numbers
-	 * allowing us to bind to one such interface without binding to
-	 * all of them
-	 */
-	if (dev->driver_info->data &&
-	    !test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) {
-		dev_info(&intf->dev, "not on our whitelist - ignored");
-		rv = -ENODEV;
-		goto err;
-	}
-
 	/*  control and data is shared */
 	info->control = intf;
 	info->data = intf;
-	rv = qmi_wwan_register_subdriver(dev);
-
-err:
-	return rv;
+	return qmi_wwan_register_subdriver(dev);
 }
 
 static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -356,214 +338,64 @@
 	.manage_power	= qmi_wwan_manage_power,
 };
 
-static const struct driver_info	qmi_wwan_force_int0 = {
-	.description	= "Qualcomm WWAN/QMI device",
-	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_shared,
-	.unbind		= qmi_wwan_unbind,
-	.manage_power	= qmi_wwan_manage_power,
-	.data		= BIT(0), /* interface whitelist bitmap */
-};
-
-static const struct driver_info	qmi_wwan_force_int1 = {
-	.description	= "Qualcomm WWAN/QMI device",
-	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_shared,
-	.unbind		= qmi_wwan_unbind,
-	.manage_power	= qmi_wwan_manage_power,
-	.data		= BIT(1), /* interface whitelist bitmap */
-};
-
-static const struct driver_info qmi_wwan_force_int2 = {
-	.description	= "Qualcomm WWAN/QMI device",
-	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_shared,
-	.unbind		= qmi_wwan_unbind,
-	.manage_power	= qmi_wwan_manage_power,
-	.data		= BIT(2), /* interface whitelist bitmap */
-};
-
-static const struct driver_info	qmi_wwan_force_int3 = {
-	.description	= "Qualcomm WWAN/QMI device",
-	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_shared,
-	.unbind		= qmi_wwan_unbind,
-	.manage_power	= qmi_wwan_manage_power,
-	.data		= BIT(3), /* interface whitelist bitmap */
-};
-
-static const struct driver_info	qmi_wwan_force_int4 = {
-	.description	= "Qualcomm WWAN/QMI device",
-	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_shared,
-	.unbind		= qmi_wwan_unbind,
-	.manage_power	= qmi_wwan_manage_power,
-	.data		= BIT(4), /* interface whitelist bitmap */
-};
-
-/* Sierra Wireless provide equally useless interface descriptors
- * Devices in QMI mode can be switched between two different
- * configurations:
- *   a) USB interface #8 is QMI/wwan
- *   b) USB interfaces #8, #19 and #20 are QMI/wwan
- *
- * Both configurations provide a number of other interfaces (serial++),
- * some of which have the same endpoint configuration as we expect, so
- * a whitelist or blacklist is necessary.
- *
- * FIXME: The below whitelist should include BIT(20).  It does not
- * because I cannot get it to work...
- */
-static const struct driver_info	qmi_wwan_sierra = {
-	.description	= "Sierra Wireless wwan/QMI device",
-	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_shared,
-	.unbind		= qmi_wwan_unbind,
-	.manage_power	= qmi_wwan_manage_power,
-	.data		= BIT(8) | BIT(19), /* interface whitelist bitmap */
-};
-
 #define HUAWEI_VENDOR_ID	0x12D1
 
+/* map QMI/wwan function by a fixed interface number */
+#define QMI_FIXED_INTF(vend, prod, num) \
+	USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
+	.driver_info = (unsigned long)&qmi_wwan_shared
+
 /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
 #define QMI_GOBI1K_DEVICE(vend, prod) \
-	USB_DEVICE(vend, prod), \
-	.driver_info = (unsigned long)&qmi_wwan_force_int3
+	QMI_FIXED_INTF(vend, prod, 3)
 
-/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
+/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */
 #define QMI_GOBI_DEVICE(vend, prod) \
-	USB_DEVICE(vend, prod), \
-	.driver_info = (unsigned long)&qmi_wwan_force_int0
+	QMI_FIXED_INTF(vend, prod, 0)
 
 static const struct usb_device_id products[] = {
+	/* 1. CDC ECM like devices match on the control interface */
 	{	/* Huawei E392, E398 and possibly others sharing both device id and more... */
-		.match_flags        = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = HUAWEI_VENDOR_ID,
-		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
-		.bInterfaceSubClass = 1,
-		.bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
+		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9),
 		.driver_info        = (unsigned long)&qmi_wwan_info,
 	},
 	{	/* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
-		.match_flags        = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = HUAWEI_VENDOR_ID,
-		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
-		.bInterfaceSubClass = 1,
-		.bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
+		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
 		.driver_info        = (unsigned long)&qmi_wwan_info,
 	},
-	{	/* Huawei E392, E398 and possibly others in "Windows mode"
-		 * using a combined control and data interface without any CDC
-		 * functional descriptors
-		 */
-		.match_flags        = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = HUAWEI_VENDOR_ID,
-		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
-		.bInterfaceSubClass = 1,
-		.bInterfaceProtocol = 17,
+
+	/* 2. Combined interface devices matching on class+protocol */
+	{	/* Huawei E392, E398 and possibly others in "Windows mode" */
+		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
 		.driver_info        = (unsigned long)&qmi_wwan_shared,
 	},
 	{	/* Pantech UML290 */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x106c,
-		.idProduct          = 0x3718,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xf0,
-		.bInterfaceProtocol = 0xff,
+		USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
 		.driver_info        = (unsigned long)&qmi_wwan_shared,
 	},
-	{	/* ZTE MF820D */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x0167,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
-	},
-	{	/* ZTE MF821D */
-		.match_flags        = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x0326,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
-	},
-	{	/* ZTE (Vodafone) K3520-Z */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x0055,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int1,
-	},
-	{	/* ZTE (Vodafone) K3565-Z */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x0063,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
-	},
-	{	/* ZTE (Vodafone) K3570-Z */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x1008,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
-	},
-	{	/* ZTE (Vodafone) K3571-Z */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x1010,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
-	},
-	{	/* ZTE (Vodafone) K3765-Z */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x2002,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
-	},
-	{	/* ZTE (Vodafone) K4505-Z */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x0104,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
-	},
-	{	/* ZTE MF60 */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x19d2,
-		.idProduct          = 0x1402,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_force_int2,
-	},
-	{	/* Sierra Wireless MC77xx in QMI mode */
-		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
-		.idVendor           = 0x1199,
-		.idProduct          = 0x68a2,
-		.bInterfaceClass    = 0xff,
-		.bInterfaceSubClass = 0xff,
-		.bInterfaceProtocol = 0xff,
-		.driver_info        = (unsigned long)&qmi_wwan_sierra,
+	{	/* Pantech UML290 - newer firmware */
+		USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
+		.driver_info        = (unsigned long)&qmi_wwan_shared,
 	},
 
-	/* Gobi 1000 devices */
+	/* 3. Combined interface devices matching on interface number */
+	{QMI_FIXED_INTF(0x19d2, 0x0055, 1)},	/* ZTE (Vodafone) K3520-Z */
+	{QMI_FIXED_INTF(0x19d2, 0x0063, 4)},	/* ZTE (Vodafone) K3565-Z */
+	{QMI_FIXED_INTF(0x19d2, 0x0104, 4)},	/* ZTE (Vodafone) K4505-Z */
+	{QMI_FIXED_INTF(0x19d2, 0x0167, 4)},	/* ZTE MF820D */
+	{QMI_FIXED_INTF(0x19d2, 0x0326, 4)},	/* ZTE MF821D */
+	{QMI_FIXED_INTF(0x19d2, 0x1008, 4)},	/* ZTE (Vodafone) K3570-Z */
+	{QMI_FIXED_INTF(0x19d2, 0x1010, 4)},	/* ZTE (Vodafone) K3571-Z */
+	{QMI_FIXED_INTF(0x19d2, 0x1018, 3)},	/* ZTE (Vodafone) K5006-Z */
+	{QMI_FIXED_INTF(0x19d2, 0x1402, 2)},	/* ZTE MF60 */
+	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
+	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
+	{QMI_FIXED_INTF(0x1199, 0x68a2, 19)},	/* Sierra Wireless MC7710 in QMI mode */
+	{QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
+
+	/* 4. Gobi 1000 devices */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
 	{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},	/* HP un2400 Gobi Modem Device */
 	{QMI_GOBI1K_DEVICE(0x03f0, 0x371d)},	/* HP un2430 Mobile Broadband Module */
@@ -579,7 +411,7 @@
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9222)},	/* Generic Gobi Modem device */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9009)},	/* Generic Gobi Modem device */
 
-	/* Gobi 2000 and 3000 devices */
+	/* 5. Gobi 2000 and 3000 devices */
 	{QMI_GOBI_DEVICE(0x413c, 0x8186)},	/* Dell Gobi 2000 Modem device (N0218, VU936) */
 	{QMI_GOBI_DEVICE(0x05c6, 0x920b)},	/* Generic Gobi 2000 Modem device */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9225)},	/* Sony Gobi 2000 Modem device (N0279, VU730) */
@@ -589,6 +421,8 @@
 	{QMI_GOBI_DEVICE(0x05c6, 0x9265)},	/* Asus Gobi 2000 Modem device (VR305) */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9235)},	/* Top Global Gobi 2000 Modem device (VR306) */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9275)},	/* iRex Technologies Gobi 2000 Modem device (VR307) */
+	{QMI_GOBI_DEVICE(0x1199, 0x68a5)},	/* Sierra Wireless Modem */
+	{QMI_GOBI_DEVICE(0x1199, 0x68a9)},	/* Sierra Wireless Modem */
 	{QMI_GOBI_DEVICE(0x1199, 0x9001)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
 	{QMI_GOBI_DEVICE(0x1199, 0x9002)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
 	{QMI_GOBI_DEVICE(0x1199, 0x9003)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
@@ -600,11 +434,14 @@
 	{QMI_GOBI_DEVICE(0x1199, 0x9009)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
 	{QMI_GOBI_DEVICE(0x1199, 0x900a)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
 	{QMI_GOBI_DEVICE(0x1199, 0x9011)},	/* Sierra Wireless Gobi 2000 Modem device (MC8305) */
+	{QMI_FIXED_INTF(0x1199, 0x9011, 5)},	/* alternate interface number!? */
 	{QMI_GOBI_DEVICE(0x16d8, 0x8002)},	/* CMDTech Gobi 2000 Modem device (VU922) */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9205)},	/* Gobi 2000 Modem device */
 	{QMI_GOBI_DEVICE(0x1199, 0x9013)},	/* Sierra Wireless Gobi 3000 Modem device (MC8355) */
 	{QMI_GOBI_DEVICE(0x1199, 0x9015)},	/* Sierra Wireless Gobi 3000 Modem device */
 	{QMI_GOBI_DEVICE(0x1199, 0x9019)},	/* Sierra Wireless Gobi 3000 Modem device */
+	{QMI_GOBI_DEVICE(0x1199, 0x901b)},	/* Sierra Wireless MC7770 */
+
 	{ }					/* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d75d1f5..7be49ea 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -68,15 +68,8 @@
  */
 #define SIERRA_NET_USBCTL_BUF_LEN	1024
 
-/* list of interface numbers - used for constructing interface lists */
-struct sierra_net_iface_info {
-	const u32 infolen;	/* number of interface numbers on list */
-	const u8  *ifaceinfo;	/* pointer to the array holding the numbers */
-};
-
 struct sierra_net_info_data {
 	u16 rx_urb_size;
-	struct sierra_net_iface_info whitelist;
 };
 
 /* Private data structure */
@@ -637,21 +630,6 @@
 	return usbnet_change_mtu(net, new_mtu);
 }
 
-static int is_whitelisted(const u8 ifnum,
-			const struct sierra_net_iface_info *whitelist)
-{
-	if (whitelist) {
-		const u8 *list = whitelist->ifaceinfo;
-		int i;
-
-		for (i = 0; i < whitelist->infolen; i++) {
-			if (list[i] == ifnum)
-				return 1;
-		}
-	}
-	return 0;
-}
-
 static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
 {
 	int result = 0;
@@ -706,11 +684,6 @@
 	dev_dbg(&dev->udev->dev, "%s", __func__);
 
 	ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
-	/* We only accept certain interfaces */
-	if (!is_whitelisted(ifacenum, &data->whitelist)) {
-		dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
-		return -ENODEV;
-	}
 	numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
 	/* We have three endpoints, bulk in and out, and a status */
 	if (numendpoints != 3) {
@@ -945,13 +918,8 @@
 	return NULL;
 }
 
-static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
 static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
 	.rx_urb_size = 8 * 1024,
-	.whitelist = {
-		.infolen = ARRAY_SIZE(sierra_net_ifnum_list),
-		.ifaceinfo = sierra_net_ifnum_list
-	}
 };
 
 static const struct driver_info sierra_net_info_direct_ip = {
@@ -965,15 +933,19 @@
 	.data = (unsigned long)&sierra_net_info_data_direct_ip,
 };
 
+#define DIRECT_IP_DEVICE(vend, prod) \
+	{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \
+	.driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
+	{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \
+	.driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
+	{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \
+	.driver_info = (unsigned long)&sierra_net_info_direct_ip}
+
 static const struct usb_device_id products[] = {
-	{USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
-	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
-	{USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
-	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
-	{USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
-	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
-	{USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
-	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
+	DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
+	DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+	DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+	DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
 
 	{}, /* last item */
 };
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5852361..e522ff7 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -348,6 +348,9 @@
 	if (tbp[IFLA_ADDRESS] == NULL)
 		eth_hw_addr_random(peer);
 
+	if (ifmp && (dev->ifindex != 0))
+		peer->ifindex = ifmp->ifi_index;
+
 	err = register_netdevice(peer);
 	put_net(net);
 	net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 83d2b0c..81a64c5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -993,7 +993,7 @@
 		goto done;
 
 	if (v & VIRTIO_NET_S_ANNOUNCE) {
-		netif_notify_peers(vi->dev);
+		netdev_notify_peers(vi->dev);
 		virtnet_ack_link_announce(vi);
 	}
 
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 93e0cfb..ce9d4f2 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3019,6 +3019,7 @@
 	netdev->watchdog_timeo = 5 * HZ;
 
 	INIT_WORK(&adapter->work, vmxnet3_reset_work);
+	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
 
 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
 		int i;
@@ -3043,7 +3044,6 @@
 		goto err_register;
 	}
 
-	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
 	vmxnet3_check_link(adapter, false);
 	atomic_inc(&devices_found);
 	return 0;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 9eb6479..ef36caf 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -774,14 +774,15 @@
 	}
 	/* Global interrupt queue */
 	writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
+
+	rc = -ENOMEM;
+
 	priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,
 		IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);
 	if (!priv->iqcfg)
 		goto err_free_irq_5;
 	writel(priv->iqcfg_dma, ioaddr + IQCFG);
 
-	rc = -ENOMEM;
-
 	/*
 	 * SCC 0-3 private rx/tx irq structures
 	 * IQRX/TXi needs to be set soon. Learned it the hard way...
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 0254261..9c34d2f 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -222,7 +222,6 @@
 	struct sk_buff *skb;
 	const struct i2400m_tlv_detailed_device_info *ddi;
 	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
-	const unsigned char zeromac[ETH_ALEN] = { 0 };
 
 	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
 	skb = i2400m_get_device_info(i2400m);
@@ -244,7 +243,7 @@
 		 "to that of boot mode's\n");
 	dev_warn(dev, "device reports     %pM\n", ddi->mac_address);
 	dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
-	if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
+	if (is_zero_ether_addr(ddi->mac_address))
 		dev_err(dev, "device reports an invalid MAC address, "
 			"not updating\n");
 	else {
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 283237f..def12b3 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -326,8 +326,10 @@
 		unsigned barker;
 
 		options_orig = kstrdup(_options, GFP_KERNEL);
-		if (options_orig == NULL)
+		if (options_orig == NULL) {
+			result = -ENOMEM;
 			goto error_parse;
+		}
 		options = options_orig;
 
 		while ((token = strsep(&options, ",")) != NULL) {
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 689a71c..154a496 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1661,7 +1661,9 @@
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
-static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev,
+		       struct ieee80211_tx_control *control,
+		       struct sk_buff *skb)
 {
 	struct adm8211_tx_hdr *txhdr;
 	size_t payload_len, hdrlen;
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index efc162e..e361afe 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -342,7 +342,7 @@
 	return ret;
 }
 
-static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state)
+static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
 {
 	int ret;
 
@@ -1726,7 +1726,9 @@
 	ieee80211_wake_queues(priv->hw);
 }
 
-static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw,
+			     struct ieee80211_tx_control *control,
+			     struct sk_buff *skb)
 {
 	struct at76_priv *priv = hw->priv;
 	struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 64a453a..3150def 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1331,7 +1331,6 @@
 	unsigned int		nexttbtt;	/* next beacon time in TU */
 	struct ath5k_txq	*cabq;		/* content after beacon */
 
-	int			power_level;	/* Requested tx power in dBm */
 	bool			assoc;		/* associate state */
 	bool			enable_beacon;	/* true if beacons are on */
 
@@ -1425,6 +1424,7 @@
 		/* Value in dB units */
 		s16		txp_cck_ofdm_pwr_delta;
 		bool		txp_setup;
+		int		txp_requested;	/* Requested tx power in dBm */
 	} ah_txpower;
 
 	struct ath5k_nfcal_hist ah_nfcal_hist;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 8c4c040..a0a202d 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -723,7 +723,7 @@
 	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
 		ieee80211_get_hdrlen_from_skb(skb), padsize,
 		get_hw_packet_type(skb),
-		(ah->power_level * 2),
+		(ah->ah_txpower.txp_requested * 2),
 		hw_rate,
 		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
 		cts_rate, duration);
@@ -1778,7 +1778,8 @@
 	ds->ds_data = bf->skbaddr;
 	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
 			ieee80211_get_hdrlen_from_skb(skb), padsize,
-			AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
+			AR5K_PKT_TYPE_BEACON,
+			(ah->ah_txpower.txp_requested * 2),
 			ieee80211_get_tx_rate(ah->hw, info)->hw_value,
 			1, AR5K_TXKEYIX_INVALID,
 			antenna, flags, 0, 0);
@@ -2056,9 +2057,7 @@
 void
 ath5k_beacon_config(struct ath5k_hw *ah)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ah->block, flags);
+	spin_lock_bh(&ah->block);
 	ah->bmisscount = 0;
 	ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
 
@@ -2085,7 +2084,7 @@
 
 	ath5k_hw_set_imr(ah, ah->imask);
 	mmiowb();
-	spin_unlock_irqrestore(&ah->block, flags);
+	spin_unlock_bh(&ah->block);
 }
 
 static void ath5k_tasklet_beacon(unsigned long data)
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 260e7dc..df61a09 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -55,7 +55,8 @@
 \********************/
 
 static void
-ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+	 struct sk_buff *skb)
 {
 	struct ath5k_hw *ah = hw->priv;
 	u16 qnum = skb_get_queue_mapping(skb);
@@ -207,8 +208,8 @@
 	}
 
 	if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
-	(ah->power_level != conf->power_level)) {
-		ah->power_level = conf->power_level;
+	(ah->ah_txpower.txp_requested != conf->power_level)) {
+		ah->ah_txpower.txp_requested = conf->power_level;
 
 		/* Half dB steps */
 		ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
@@ -254,7 +255,6 @@
 	struct ath5k_vif *avf = (void *)vif->drv_priv;
 	struct ath5k_hw *ah = hw->priv;
 	struct ath_common *common = ath5k_hw_common(ah);
-	unsigned long flags;
 
 	mutex_lock(&ah->lock);
 
@@ -300,9 +300,9 @@
 	}
 
 	if (changes & BSS_CHANGED_BEACON) {
-		spin_lock_irqsave(&ah->block, flags);
+		spin_lock_bh(&ah->block);
 		ath5k_beacon_update(hw, vif);
-		spin_unlock_irqrestore(&ah->block, flags);
+		spin_unlock_bh(&ah->block);
 	}
 
 	if (changes & BSS_CHANGED_BEACON_ENABLED)
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 8b71a2d..01c90ed 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3516,6 +3516,7 @@
 {
 	unsigned int i;
 	u16 *rates;
+	s16 rate_idx_scaled = 0;
 
 	/* max_pwr is power level we got from driver/user in 0.5dB
 	 * units, switch to 0.25dB units so we can compare */
@@ -3562,20 +3563,32 @@
 		for (i = 8; i <= 15; i++)
 			rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
 
+	/* Save min/max and current tx power for this channel
+	 * in 0.25dB units.
+	 *
+	 * Note: We use rates[0] for current tx power because
+	 * it covers most of the rates, in most cases. It's our
+	 * tx power limit and what the user expects to see. */
+	ah->ah_txpower.txp_min_pwr = 2 * rates[7];
+	ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
+
+	/* Set max txpower for correct OFDM operation on all rates
+	 * -that is the txpower for 54Mbit-, it's used for the PAPD
+	 * gain probe and it's in 0.5dB units */
+	ah->ah_txpower.txp_ofdm = rates[7];
+
 	/* Now that we have all rates setup use table offset to
 	 * match the power range set by user with the power indices
 	 * on PCDAC/PDADC table */
 	for (i = 0; i < 16; i++) {
-		rates[i] += ah->ah_txpower.txp_offset;
+		rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
 		/* Don't get out of bounds */
-		if (rates[i] > 63)
-			rates[i] = 63;
+		if (rate_idx_scaled > 63)
+			rate_idx_scaled = 63;
+		if (rate_idx_scaled < 0)
+			rate_idx_scaled = 0;
+		rates[i] = rate_idx_scaled;
 	}
-
-	/* Min/max in 0.25dB units */
-	ah->ah_txpower.txp_min_pwr = 2 * rates[7];
-	ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
-	ah->ah_txpower.txp_ofdm = rates[7];
 }
 
 
@@ -3639,10 +3652,17 @@
 	if (!ah->ah_txpower.txp_setup ||
 	    (channel->hw_value != curr_channel->hw_value) ||
 	    (channel->center_freq != curr_channel->center_freq)) {
-		/* Reset TX power values */
+		/* Reset TX power values but preserve requested
+		 * tx power from above */
+		int requested_txpower = ah->ah_txpower.txp_requested;
+
 		memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
+
+		/* Restore TPC setting and requested tx power */
 		ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
 
+		ah->ah_txpower.txp_requested = requested_txpower;
+
 		/* Calculate the powertable */
 		ret = ath5k_setup_channel_powertable(ah, channel,
 							ee_mode, type);
@@ -3789,8 +3809,9 @@
 	 * RF buffer settings on 5211/5212+ so that we
 	 * properly set curve indices.
 	 */
-	ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ?
-			ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER);
+	ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
+					ah->ah_txpower.txp_requested * 2 :
+					AR5K_TUNE_MAX_TXPOWER);
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 2588848..c37fe96 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4901,90 +4901,79 @@
 				i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
 				chan->channel);
 
-				/*
-				 * compare test group from regulatory
-				 * channel list with test mode from pCtlMode
-				 * list
-				 */
-				if ((((cfgCtl & ~CTL_MODE_M) |
-				       (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-					ctlIndex[i]) ||
-				    (((cfgCtl & ~CTL_MODE_M) |
-				       (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-				     ((ctlIndex[i] & CTL_MODE_M) |
-				       SD_NO_CTL))) {
-					twiceMinEdgePower =
-					  ar9003_hw_get_max_edge_power(pEepData,
-								       freq, i,
-								       is2ghz);
+			/*
+			 * compare test group from regulatory
+			 * channel list with test mode from pCtlMode
+			 * list
+			 */
+			if ((((cfgCtl & ~CTL_MODE_M) |
+			       (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+				ctlIndex[i]) ||
+			    (((cfgCtl & ~CTL_MODE_M) |
+			       (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+			     ((ctlIndex[i] & CTL_MODE_M) |
+			       SD_NO_CTL))) {
+				twiceMinEdgePower =
+				  ar9003_hw_get_max_edge_power(pEepData,
+							       freq, i,
+							       is2ghz);
 
-					if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
-						/*
-						 * Find the minimum of all CTL
-						 * edge powers that apply to
-						 * this channel
-						 */
-						twiceMaxEdgePower =
-							min(twiceMaxEdgePower,
-							    twiceMinEdgePower);
-						else {
-							/* specific */
-							twiceMaxEdgePower =
-							  twiceMinEdgePower;
-							break;
-						}
+				if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
+					/*
+					 * Find the minimum of all CTL
+					 * edge powers that apply to
+					 * this channel
+					 */
+					twiceMaxEdgePower =
+						min(twiceMaxEdgePower,
+						    twiceMinEdgePower);
+				else {
+					/* specific */
+					twiceMaxEdgePower = twiceMinEdgePower;
+					break;
 				}
 			}
+		}
 
-			minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+		minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
 
-			ath_dbg(common, REGULATORY,
-				"SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
-				ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
-				scaledPower, minCtlPower);
+		ath_dbg(common, REGULATORY,
+			"SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
+			ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+			scaledPower, minCtlPower);
 
-			/* Apply ctl mode to correct target power set */
-			switch (pCtlMode[ctlMode]) {
-			case CTL_11B:
-				for (i = ALL_TARGET_LEGACY_1L_5L;
-				     i <= ALL_TARGET_LEGACY_11S; i++)
-					pPwrArray[i] =
-					  (u8)min((u16)pPwrArray[i],
-						  minCtlPower);
-				break;
-			case CTL_11A:
-			case CTL_11G:
-				for (i = ALL_TARGET_LEGACY_6_24;
-				     i <= ALL_TARGET_LEGACY_54; i++)
-					pPwrArray[i] =
-					  (u8)min((u16)pPwrArray[i],
-						  minCtlPower);
-				break;
-			case CTL_5GHT20:
-			case CTL_2GHT20:
-				for (i = ALL_TARGET_HT20_0_8_16;
-				     i <= ALL_TARGET_HT20_21; i++)
-					pPwrArray[i] =
-					  (u8)min((u16)pPwrArray[i],
-						  minCtlPower);
-				pPwrArray[ALL_TARGET_HT20_22] =
-				  (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22],
-					  minCtlPower);
-				pPwrArray[ALL_TARGET_HT20_23] =
-				  (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23],
-					   minCtlPower);
-				break;
-			case CTL_5GHT40:
-			case CTL_2GHT40:
-				for (i = ALL_TARGET_HT40_0_8_16;
-				     i <= ALL_TARGET_HT40_23; i++)
-					pPwrArray[i] =
-					  (u8)min((u16)pPwrArray[i],
-						  minCtlPower);
-				break;
-			default:
-			    break;
-			}
+		/* Apply ctl mode to correct target power set */
+		switch (pCtlMode[ctlMode]) {
+		case CTL_11B:
+			for (i = ALL_TARGET_LEGACY_1L_5L;
+			     i <= ALL_TARGET_LEGACY_11S; i++)
+				pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+						       minCtlPower);
+			break;
+		case CTL_11A:
+		case CTL_11G:
+			for (i = ALL_TARGET_LEGACY_6_24;
+			     i <= ALL_TARGET_LEGACY_54; i++)
+				pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+						       minCtlPower);
+			break;
+		case CTL_5GHT20:
+		case CTL_2GHT20:
+			for (i = ALL_TARGET_HT20_0_8_16;
+			     i <= ALL_TARGET_HT20_23; i++)
+				pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+						       minCtlPower);
+			break;
+		case CTL_5GHT40:
+		case CTL_2GHT40:
+			for (i = ALL_TARGET_HT40_0_8_16;
+			     i <= ALL_TARGET_HT40_23; i++)
+				pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+						       minCtlPower);
+			break;
+		default:
+			break;
+		}
 	} /* end ctl mode checking */
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b09285c..7373e4b9 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -280,6 +280,7 @@
 	struct ath_txq *txq;
 	struct ath_node *an;
 	u8 paprd;
+	struct ieee80211_sta *sta;
 };
 
 #define ATH_TX_ERROR        0x01
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 936e920..b30596f 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -542,6 +542,7 @@
 
 int ath9k_tx_init(struct ath9k_htc_priv *priv);
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+		       struct ieee80211_sta *sta,
 		       struct sk_buff *skb, u8 slot, bool is_cab);
 void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
 bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 77d541f..f42d2eb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -326,7 +326,7 @@
 			goto next;
 		}
 
-		ret = ath9k_htc_tx_start(priv, skb, tx_slot, true);
+		ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
 		if (ret != 0) {
 			ath9k_htc_tx_clear_slot(priv, tx_slot);
 			dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c785129..c32f6e3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -856,7 +856,9 @@
 /* mac80211 Callbacks */
 /**********************/
 
-static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw,
+			 struct ieee80211_tx_control *control,
+			 struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr;
 	struct ath9k_htc_priv *priv = hw->priv;
@@ -883,7 +885,7 @@
 		goto fail_tx;
 	}
 
-	ret = ath9k_htc_tx_start(priv, skb, slot, false);
+	ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
 	if (ret != 0) {
 		ath_dbg(common, XMIT, "Tx failed\n");
 		goto clear_slot;
@@ -1331,6 +1333,34 @@
 	return ret;
 }
 
+static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_sta *sta, u32 changed)
+{
+	struct ath9k_htc_priv *priv = hw->priv;
+	struct ath_common *common = ath9k_hw_common(priv->ah);
+	struct ath9k_htc_target_rate trate;
+
+	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
+
+	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+		memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+		ath9k_htc_setup_rate(priv, sta, &trate);
+		if (!ath9k_htc_send_rate_cmd(priv, &trate))
+			ath_dbg(common, CONFIG,
+				"Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+				sta->addr, be32_to_cpu(trate.capflags));
+		else
+			ath_dbg(common, CONFIG,
+				"Unable to update supported rates for sta: %pM\n",
+				sta->addr);
+	}
+
+	ath9k_htc_ps_restore(priv);
+	mutex_unlock(&priv->mutex);
+}
+
 static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
 			     struct ieee80211_vif *vif, u16 queue,
 			     const struct ieee80211_tx_queue_params *params)
@@ -1758,6 +1788,7 @@
 	.sta_add            = ath9k_htc_sta_add,
 	.sta_remove         = ath9k_htc_sta_remove,
 	.conf_tx            = ath9k_htc_conf_tx,
+	.sta_rc_update      = ath9k_htc_sta_rc_update,
 	.bss_info_changed   = ath9k_htc_bss_info_changed,
 	.set_key            = ath9k_htc_set_key,
 	.get_tsf            = ath9k_htc_get_tsf,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 47e61d0..06cdcb7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -333,12 +333,12 @@
 }
 
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+		       struct ieee80211_sta *sta,
 		       struct sk_buff *skb,
 		       u8 slot, bool is_cab)
 {
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_sta *sta = tx_info->control.sta;
 	struct ieee80211_vif *vif = tx_info->control.vif;
 	struct ath9k_htc_sta *ista;
 	struct ath9k_htc_vif *avp = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cfa91ab..60b6a9d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -730,6 +730,7 @@
 	case AR9300_DEVID_QCA955X:
 	case AR9300_DEVID_AR9580:
 	case AR9300_DEVID_AR9462:
+	case AR9485_DEVID_AR1111:
 		break;
 	default:
 		if (common->bus_ops->ath_bus_type == ATH_USB)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dd0c146..ce7332c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -49,6 +49,7 @@
 #define AR9300_DEVID_AR9462	0x0034
 #define AR9300_DEVID_AR9330	0x0035
 #define AR9300_DEVID_QCA955X	0x0038
+#define AR9485_DEVID_AR1111	0x0037
 
 #define AR5416_AR9100_DEVID	0x000b
 
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 7990cd5..b42be91 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -773,15 +773,10 @@
 }
 EXPORT_SYMBOL(ath9k_hw_intrpend);
 
-void ath9k_hw_disable_interrupts(struct ath_hw *ah)
+void ath9k_hw_kill_interrupts(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	if (!(ah->imask & ATH9K_INT_GLOBAL))
-		atomic_set(&ah->intr_ref_cnt, -1);
-	else
-		atomic_dec(&ah->intr_ref_cnt);
-
 	ath_dbg(common, INTERRUPT, "disable IER\n");
 	REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
 	(void) REG_READ(ah, AR_IER);
@@ -793,6 +788,17 @@
 		(void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
 	}
 }
+EXPORT_SYMBOL(ath9k_hw_kill_interrupts);
+
+void ath9k_hw_disable_interrupts(struct ath_hw *ah)
+{
+	if (!(ah->imask & ATH9K_INT_GLOBAL))
+		atomic_set(&ah->intr_ref_cnt, -1);
+	else
+		atomic_dec(&ah->intr_ref_cnt);
+
+	ath9k_hw_kill_interrupts(ah);
+}
 EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
 
 void ath9k_hw_enable_interrupts(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 0eba36d..4a745e6 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -738,6 +738,7 @@
 void ath9k_hw_set_interrupts(struct ath_hw *ah);
 void ath9k_hw_enable_interrupts(struct ath_hw *ah);
 void ath9k_hw_disable_interrupts(struct ath_hw *ah);
+void ath9k_hw_kill_interrupts(struct ath_hw *ah);
 
 void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
 
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6049d8b..8a2b04d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -462,8 +462,10 @@
 	if (!ath9k_hw_intrpend(ah))
 		return IRQ_NONE;
 
-	if(test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+	if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
+		ath9k_hw_kill_interrupts(ah);
 		return IRQ_HANDLED;
+	}
 
 	/*
 	 * Figure out the reason(s) for the interrupt.  Note
@@ -694,7 +696,9 @@
 	return r;
 }
 
-static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw,
+		     struct ieee80211_tx_control *control,
+		     struct sk_buff *skb)
 {
 	struct ath_softc *sc = hw->priv;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -754,6 +758,7 @@
 
 	memset(&txctl, 0, sizeof(struct ath_tx_control));
 	txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
+	txctl.sta = control->sta;
 
 	ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
 
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 87b89d5..a978984 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -37,6 +37,7 @@
 	{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E  AR9485 */
 	{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E  AR9580 */
 	{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
+	{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
 	{ 0 }
 };
 
@@ -320,6 +321,7 @@
 	 * Otherwise the chip never moved to full sleep,
 	 * when no interface is up.
 	 */
+	ath9k_stop_btcoex(sc);
 	ath9k_hw_disable(sc->sc_ah);
 	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
 
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e034add..4b12c347 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -25,141 +25,141 @@
 	8, /* MCS start */
 	{
 		[0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
-			5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */
+			5400, 0, 12 }, /* 6 Mb */
 		[1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
-			7800,  1, 18, 0, 1, 1, 1 }, /* 9 Mb */
+			7800,  1, 18 }, /* 9 Mb */
 		[2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-			10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */
+			10000, 2, 24 }, /* 12 Mb */
 		[3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-			13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */
+			13900, 3, 36 }, /* 18 Mb */
 		[4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-			17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */
+			17300, 4, 48 }, /* 24 Mb */
 		[5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-			23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */
+			23000, 5, 72 }, /* 36 Mb */
 		[6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-			27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */
+			27400, 6, 96 }, /* 48 Mb */
 		[7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-			29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */
+			29300, 7, 108 }, /* 54 Mb */
 		[8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
-			6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */
+			6400, 0, 0 }, /* 6.5 Mb */
 		[9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-			12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */
+			12700, 1, 1 }, /* 13 Mb */
 		[10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-			18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */
+			18800, 2, 2 }, /* 19.5 Mb */
 		[11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-			25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */
+			25000, 3, 3 }, /* 26 Mb */
 		[12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-			36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */
+			36700, 4, 4 }, /* 39 Mb */
 		[13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-			48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */
+			48100, 5, 5 }, /* 52 Mb */
 		[14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-			53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */
+			53500, 6, 6 }, /* 58.5 Mb */
 		[15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-			59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */
+			59000, 7, 7 }, /* 65 Mb */
 		[16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-			65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */
+			65400, 7, 7 }, /* 75 Mb */
 		[17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-			12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */
+			12700, 8, 8 }, /* 13 Mb */
 		[18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-			24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */
+			24800, 9, 9 }, /* 26 Mb */
 		[19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-			36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */
+			36600, 10, 10 }, /* 39 Mb */
 		[20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-			48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */
+			48100, 11, 11 }, /* 52 Mb */
 		[21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-			69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */
+			69500, 12, 12 }, /* 78 Mb */
 		[22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-			89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */
+			89500, 13, 13 }, /* 104 Mb */
 		[23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-			98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */
+			98900, 14, 14 }, /* 117 Mb */
 		[24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-			108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */
+			108300, 15, 15 }, /* 130 Mb */
 		[25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-			120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */
+			120000, 15, 15 }, /* 144.4 Mb */
 		[26] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-			17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */
+			17400, 16, 16 }, /* 19.5 Mb */
 		[27] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-			35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */
+			35100, 17, 17 }, /* 39 Mb */
 		[28] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-			52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */
+			52600, 18, 18 }, /* 58.5 Mb */
 		[29] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-			70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */
+			70400, 19, 19 }, /* 78 Mb */
 		[30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-			104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */
+			104900, 20, 20 }, /* 117 Mb */
 		[31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-			115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/
+			115800, 20, 20 }, /* 130 Mb*/
 		[32] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-			137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */
+			137200, 21, 21 }, /* 156 Mb */
 		[33] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-			151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */
+			151100, 21, 21 }, /* 173.3 Mb */
 		[34] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-			152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */
+			152800, 22, 22 }, /* 175.5 Mb */
 		[35] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-			168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/
+			168400, 22, 22 }, /* 195 Mb*/
 		[36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-			168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */
+			168400, 23, 23 }, /* 195 Mb */
 		[37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-			185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */
+			185000, 23, 23 }, /* 216.7 Mb */
 		[38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-			13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/
+			13200, 0, 0 }, /* 13.5 Mb*/
 		[39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-			25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/
+			25900, 1, 1 }, /* 27.0 Mb*/
 		[40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-			38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/
+			38600, 2, 2 }, /* 40.5 Mb*/
 		[41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-			49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */
+			49800, 3, 3 }, /* 54 Mb */
 		[42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-			72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */
+			72200, 4, 4 }, /* 81 Mb */
 		[43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
-			92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */
+			92900, 5, 5 }, /* 108 Mb */
 		[44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-			102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/
+			102700, 6, 6 }, /* 121.5 Mb*/
 		[45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-			112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */
+			112000, 7, 7 }, /* 135 Mb */
 		[46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-			122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */
+			122000, 7, 7 }, /* 150 Mb */
 		[47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-			25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */
+			25800, 8, 8 }, /* 27 Mb */
 		[48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-			49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */
+			49800, 9, 9 }, /* 54 Mb */
 		[49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-			71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */
+			71900, 10, 10 }, /* 81 Mb */
 		[50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-			92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */
+			92500, 11, 11 }, /* 108 Mb */
 		[51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-			130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */
+			130300, 12, 12 }, /* 162 Mb */
 		[52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-			162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */
+			162800, 13, 13 }, /* 216 Mb */
 		[53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-			178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */
+			178200, 14, 14 }, /* 243 Mb */
 		[54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-			192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */
+			192100, 15, 15 }, /* 270 Mb */
 		[55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-			207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */
+			207000, 15, 15 }, /* 300 Mb */
 		[56] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-			36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */
+			36100, 16, 16 }, /* 40.5 Mb */
 		[57] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-			72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */
+			72900, 17, 17 }, /* 81 Mb */
 		[58] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-			108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */
+			108300, 18, 18 }, /* 121.5 Mb */
 		[59] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-			142000, 19, 19, 4, 59, 59, 59 }, /*  162 Mb */
+			142000, 19, 19 }, /*  162 Mb */
 		[60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-			205100, 20, 20, 4, 60, 61, 61 }, /*  243 Mb */
+			205100, 20, 20 }, /*  243 Mb */
 		[61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-			224700, 20, 20, 4, 60, 61, 61 }, /*  270 Mb */
+			224700, 20, 20 }, /*  270 Mb */
 		[62] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-			263100, 21, 21, 4, 62, 63, 63 }, /*  324 Mb */
+			263100, 21, 21 }, /*  324 Mb */
 		[63] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-			288000, 21, 21, 4, 62, 63, 63 }, /*  360 Mb */
+			288000, 21, 21 }, /*  360 Mb */
 		[64] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-			290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */
+			290700, 22, 22 }, /* 364.5 Mb */
 		[65] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-			317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */
+			317200, 22, 22 }, /* 405 Mb */
 		[66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-			317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */
+			317200, 23, 23 }, /* 405 Mb */
 		[67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-			346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */
+			346400, 23, 23 }, /* 450 Mb */
 	},
 	50,  /* probe interval */
 	WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -173,149 +173,149 @@
 	12, /* MCS start */
 	{
 		[0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
-			900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */
+			900, 0, 2 }, /* 1 Mb */
 		[1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
-			1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */
+			1900, 1, 4 }, /* 2 Mb */
 		[2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
-			4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */
+			4900, 2, 11 }, /* 5.5 Mb */
 		[3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
-			8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */
+			8100, 3, 22 }, /* 11 Mb */
 		[4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
-			5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */
+			5400, 4, 12 }, /* 6 Mb */
 		[5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
-			7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */
+			7800, 5, 18 }, /* 9 Mb */
 		[6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-			10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */
+			10100, 6, 24 }, /* 12 Mb */
 		[7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-			14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */
+			14100, 7, 36 }, /* 18 Mb */
 		[8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-			17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */
+			17700, 8, 48 }, /* 24 Mb */
 		[9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-			23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */
+			23700, 9, 72 }, /* 36 Mb */
 		[10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-			27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */
+			27400, 10, 96 }, /* 48 Mb */
 		[11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-			30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */
+			30900, 11, 108 }, /* 54 Mb */
 		[12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
-			6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */
+			6400, 0, 0 }, /* 6.5 Mb */
 		[13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-			12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */
+			12700, 1, 1 }, /* 13 Mb */
 		[14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-			18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/
+			18800, 2, 2 }, /* 19.5 Mb*/
 		[15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-			25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */
+			25000, 3, 3 }, /* 26 Mb */
 		[16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-			36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */
+			36700, 4, 4 }, /* 39 Mb */
 		[17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-			48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */
+			48100, 5, 5 }, /* 52 Mb */
 		[18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-			53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */
+			53500, 6, 6 }, /* 58.5 Mb */
 		[19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-			59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */
+			59000, 7, 7 }, /* 65 Mb */
 		[20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-			65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/
+			65400, 7, 7 }, /* 65 Mb*/
 		[21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-			12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */
+			12700, 8, 8 }, /* 13 Mb */
 		[22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-			24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */
+			24800, 9, 9 }, /* 26 Mb */
 		[23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-			36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */
+			36600, 10, 10 }, /* 39 Mb */
 		[24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-			48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */
+			48100, 11, 11 }, /* 52 Mb */
 		[25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-			69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */
+			69500, 12, 12 }, /* 78 Mb */
 		[26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-			89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */
+			89500, 13, 13 }, /* 104 Mb */
 		[27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-			98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */
+			98900, 14, 14 }, /* 117 Mb */
 		[28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-			108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */
+			108300, 15, 15 }, /* 130 Mb */
 		[29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-			120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */
+			120000, 15, 15 }, /* 144.4 Mb */
 		[30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-			17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */
+			17400, 16, 16 }, /* 19.5 Mb */
 		[31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-			35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */
+			35100, 17, 17 }, /* 39 Mb */
 		[32] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-			52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */
+			52600, 18, 18 }, /* 58.5 Mb */
 		[33] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-			70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */
+			70400, 19, 19 }, /* 78 Mb */
 		[34] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-			104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */
+			104900, 20, 20 }, /* 117 Mb */
 		[35] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-			115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */
+			115800, 20, 20 }, /* 130 Mb */
 		[36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-			137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */
+			137200, 21, 21 }, /* 156 Mb */
 		[37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-			151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */
+			151100, 21, 21 }, /* 173.3 Mb */
 		[38] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-			152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */
+			152800, 22, 22 }, /* 175.5 Mb */
 		[39] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-			168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */
+			168400, 22, 22 }, /* 195 Mb */
 		[40] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-			168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */
+			168400, 23, 23 }, /* 195 Mb */
 		[41] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-			185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */
+			185000, 23, 23 }, /* 216.7 Mb */
 		[42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-			13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */
+			13200, 0, 0 }, /* 13.5 Mb */
 		[43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-			25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */
+			25900, 1, 1 }, /* 27.0 Mb */
 		[44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-			38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */
+			38600, 2, 2 }, /* 40.5 Mb */
 		[45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-			49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */
+			49800, 3, 3 }, /* 54 Mb */
 		[46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-			72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */
+			72200, 4, 4 }, /* 81 Mb */
 		[47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
-			92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */
+			92900, 5, 5 }, /* 108 Mb */
 		[48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-			102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */
+			102700, 6, 6 }, /* 121.5 Mb */
 		[49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-			112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */
+			112000, 7, 7 }, /* 135 Mb */
 		[50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-			122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */
+			122000, 7, 7 }, /* 150 Mb */
 		[51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-			25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */
+			25800, 8, 8 }, /* 27 Mb */
 		[52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-			49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */
+			49800, 9, 9 }, /* 54 Mb */
 		[53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-			71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */
+			71900, 10, 10 }, /* 81 Mb */
 		[54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-			92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */
+			92500, 11, 11 }, /* 108 Mb */
 		[55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-			130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */
+			130300, 12, 12 }, /* 162 Mb */
 		[56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-			162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */
+			162800, 13, 13 }, /* 216 Mb */
 		[57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-			178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */
+			178200, 14, 14 }, /* 243 Mb */
 		[58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-			192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */
+			192100, 15, 15 }, /* 270 Mb */
 		[59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-			207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */
+			207000, 15, 15 }, /* 300 Mb */
 		[60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-			36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */
+			36100, 16, 16 }, /* 40.5 Mb */
 		[61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-			72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */
+			72900, 17, 17 }, /* 81 Mb */
 		[62] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-			108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */
+			108300, 18, 18 }, /* 121.5 Mb */
 		[63] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-			142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */
+			142000, 19, 19 }, /* 162 Mb */
 		[64] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-			205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */
+			205100, 20, 20 }, /* 243 Mb */
 		[65] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-			224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */
+			224700, 20, 20 }, /* 270 Mb */
 		[66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-			263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */
+			263100, 21, 21 }, /* 324 Mb */
 		[67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-			288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */
+			288000, 21, 21 }, /* 360 Mb */
 		[68] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-			290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */
+			290700, 22, 22 }, /* 364.5 Mb */
 		[69] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-			317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */
+			317200, 22, 22 }, /* 405 Mb */
 		[70] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-			317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */
+			317200, 23, 23 }, /* 405 Mb */
 		[71] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-			346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */
+			346400, 23, 23 }, /* 450 Mb */
 	},
 	50,  /* probe interval */
 	WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -326,21 +326,21 @@
 	0,
 	{
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-			5400, 0, 12, 0},
+			5400, 0, 12},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-			7800,  1, 18, 0},
+			7800,  1, 18},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-			10000, 2, 24, 2},
+			10000, 2, 24},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-			13900, 3, 36, 2},
+			13900, 3, 36},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-			17300, 4, 48, 4},
+			17300, 4, 48},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-			23000, 5, 72, 4},
+			23000, 5, 72},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-			27400, 6, 96, 4},
+			27400, 6, 96},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-			29300, 7, 108, 4},
+			29300, 7, 108},
 	},
 	50,  /* probe interval */
 	0,   /* Phy rates allowed initially */
@@ -351,63 +351,62 @@
 	0,
 	{
 		{ RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
-			900, 0, 2, 0},
+			900, 0, 2},
 		{ RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
-			1900, 1, 4, 1},
+			1900, 1, 4},
 		{ RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
-			4900, 2, 11, 2},
+			4900, 2, 11},
 		{ RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
-			8100, 3, 22, 3},
+			8100, 3, 22},
 		{ RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-			5400, 4, 12, 4},
+			5400, 4, 12},
 		{ RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-			7800, 5, 18, 4},
+			7800, 5, 18},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-			10000, 6, 24, 6},
+			10000, 6, 24},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-			13900, 7, 36, 6},
+			13900, 7, 36},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-			17300, 8, 48, 8},
+			17300, 8, 48},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-			23000, 9, 72, 8},
+			23000, 9, 72},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-			27400, 10, 96, 8},
+			27400, 10, 96},
 		{ RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-			29300, 11, 108, 8},
+			29300, 11, 108},
 	},
 	50,  /* probe interval */
 	0,   /* Phy rates allowed initially */
 };
 
-static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
+static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
 				struct ieee80211_tx_rate *rate)
 {
-	int rix = 0, i = 0;
-	static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
+	const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+	int rix, i, idx = 0;
 
 	if (!(rate->flags & IEEE80211_TX_RC_MCS))
 		return rate->idx;
 
-	while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
-		rix++; i++;
+	for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
+		idx = ath_rc_priv->valid_rate_index[i];
+
+		if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
+		    rate_table->info[idx].ratecode == rate->idx)
+			break;
 	}
 
-	rix += rate->idx + rate_table->mcs_start;
+	rix = idx;
 
-	if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
-	    (rate->flags & IEEE80211_TX_RC_SHORT_GI))
-		rix = rate_table->info[rix].ht_index;
-	else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-		rix = rate_table->info[rix].sgi_index;
-	else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-		rix = rate_table->info[rix].cw40index;
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		rix++;
 
 	return rix;
 }
 
-static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
-				   struct ath_rate_priv *ath_rc_priv)
+static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
 {
+	const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
 	u8 i, j, idx, idx_next;
 
 	for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
@@ -424,21 +423,6 @@
 	}
 }
 
-static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
-{
-	u8 i;
-
-	for (i = 0; i < ath_rc_priv->rate_table_size; i++)
-		ath_rc_priv->valid_rate_index[i] = 0;
-}
-
-static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
-					   u8 index, int valid_tx_rate)
-{
-	BUG_ON(index > ath_rc_priv->rate_table_size);
-	ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate;
-}
-
 static inline
 int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
 				struct ath_rate_priv *ath_rc_priv,
@@ -479,8 +463,7 @@
 }
 
 static inline int
-ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
-		     struct ath_rate_priv *ath_rc_priv,
+ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
 		     u8 cur_valid_txrate, u8 *next_idx)
 {
 	int8_t i;
@@ -495,10 +478,9 @@
 	return 0;
 }
 
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
-				 const struct ath_rate_table *rate_table,
-				 u32 capflag)
+static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
 {
+	const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
 	u8 i, hi = 0;
 
 	for (i = 0; i < rate_table->rate_cnt; i++) {
@@ -506,14 +488,14 @@
 			u32 phy = rate_table->info[i].phy;
 			u8 valid_rate_count = 0;
 
-			if (!ath_rc_valid_phyrate(phy, capflag, 0))
+			if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
 				continue;
 
 			valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
 
 			ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
 			ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-			ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1);
+			ath_rc_priv->valid_rate_index[i] = true;
 			hi = i;
 		}
 	}
@@ -521,76 +503,73 @@
 	return hi;
 }
 
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
-				const struct ath_rate_table *rate_table,
-				struct ath_rateset *rateset,
-				u32 capflag)
+static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
+				       u32 phy, u32 capflag)
 {
-	u8 i, j, hi = 0;
+	if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
+		return false;
 
-	/* Use intersection of working rates and valid rates */
-	for (i = 0; i < rateset->rs_nrates; i++) {
-		for (j = 0; j < rate_table->rate_cnt; j++) {
-			u32 phy = rate_table->info[j].phy;
-			u16 rate_flags = rate_table->info[j].rate_flags;
-			u8 rate = rateset->rs_rates[i];
-			u8 dot11rate = rate_table->info[j].dot11rate;
+	if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
+		return false;
 
-			/* We allow a rate only if its valid and the
-			 * capflag matches one of the validity
-			 * (VALID/VALID_20/VALID_40) flags */
+	if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+		return false;
 
-			if ((rate == dot11rate) &&
-			    (rate_flags & WLAN_RC_CAP_MODE(capflag)) ==
-			    WLAN_RC_CAP_MODE(capflag) &&
-			    (rate_flags & WLAN_RC_CAP_STREAM(capflag)) &&
-			    !WLAN_RC_PHY_HT(phy)) {
-				u8 valid_rate_count = 0;
-
-				if (!ath_rc_valid_phyrate(phy, capflag, 0))
-					continue;
-
-				valid_rate_count =
-					ath_rc_priv->valid_phy_ratecnt[phy];
-
-				ath_rc_priv->valid_phy_rateidx[phy]
-					[valid_rate_count] = j;
-				ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-				ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
-				hi = max(hi, j);
-			}
-		}
-	}
-
-	return hi;
+	return true;
 }
 
-static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
-				  const struct ath_rate_table *rate_table,
-				  struct ath_rateset *rateset, u32 capflag)
+static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
+				   u32 phy, u32 capflag)
 {
-	u8 i, j, hi = 0;
+	if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
+		return false;
 
-	/* Use intersection of working rates and valid rates */
+	if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+		return false;
+
+	if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+		return false;
+
+	return true;
+}
+
+static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
+{
+	const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+	struct ath_rateset *rateset;
+	u32 phy, capflag = ath_rc_priv->ht_cap;
+	u16 rate_flags;
+	u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
+
+	if (legacy)
+		rateset = &ath_rc_priv->neg_rates;
+	else
+		rateset = &ath_rc_priv->neg_ht_rates;
+
 	for (i = 0; i < rateset->rs_nrates; i++) {
 		for (j = 0; j < rate_table->rate_cnt; j++) {
-			u32 phy = rate_table->info[j].phy;
-			u16 rate_flags = rate_table->info[j].rate_flags;
-			u8 rate = rateset->rs_rates[i];
-			u8 dot11rate = rate_table->info[j].dot11rate;
+			phy = rate_table->info[j].phy;
+			rate_flags = rate_table->info[j].rate_flags;
+			rate = rateset->rs_rates[i];
+			dot11rate = rate_table->info[j].dot11rate;
 
-			if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) ||
-			    !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) ||
-			    !WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+			if (legacy &&
+			    !ath_rc_check_legacy(rate, dot11rate,
+						 rate_flags, phy, capflag))
+				continue;
+
+			if (!legacy &&
+			    !ath_rc_check_ht(rate, dot11rate,
+					     rate_flags, phy, capflag))
 				continue;
 
 			if (!ath_rc_valid_phyrate(phy, capflag, 0))
 				continue;
 
-			ath_rc_priv->valid_phy_rateidx[phy]
-				[ath_rc_priv->valid_phy_ratecnt[phy]] = j;
+			valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
+			ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
 			ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-			ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
+			ath_rc_priv->valid_rate_index[j] = true;
 			hi = max(hi, j);
 		}
 	}
@@ -598,13 +577,10 @@
 	return hi;
 }
 
-/* Finds the highest rate index we can use */
-static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
-			         struct ath_rate_priv *ath_rc_priv,
-				 const struct ath_rate_table *rate_table,
-				 int *is_probing,
-				 bool legacy)
+static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
+				 int *is_probing)
 {
+	const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
 	u32 best_thruput, this_thruput, now_msec;
 	u8 rate, next_rate, best_rate, maxindex, minindex;
 	int8_t index = 0;
@@ -624,8 +600,6 @@
 		u8 per_thres;
 
 		rate = ath_rc_priv->valid_rate_index[index];
-		if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY))
-			continue;
 		if (rate > ath_rc_priv->rate_max_phy)
 			continue;
 
@@ -707,8 +681,6 @@
 	rate->count = tries;
 	rate->idx = rate_table->info[rix].ratecode;
 
-	if (txrc->short_preamble)
-		rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
 	if (txrc->rts || rtsctsenable)
 		rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
 
@@ -726,37 +698,25 @@
 				   const struct ath_rate_table *rate_table,
 				   struct ieee80211_tx_info *tx_info)
 {
-	struct ieee80211_tx_rate *rates = tx_info->control.rates;
-	int i = 0, rix = 0, cix, enable_g_protection = 0;
+	struct ieee80211_bss_conf *bss_conf;
 
-	/* get the cix for the lowest valid rix */
-	for (i = 3; i >= 0; i--) {
-		if (rates[i].count && (rates[i].idx >= 0)) {
-			rix = ath_rc_get_rateindex(rate_table, &rates[i]);
-			break;
-		}
-	}
-	cix = rate_table->info[rix].ctrl_rate;
+	if (!tx_info->control.vif)
+		return;
+	/*
+	 * For legacy frames, mac80211 takes care of CTS protection.
+	 */
+	if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
+		return;
 
-	/* All protection frames are transmited at 2Mb/s for 802.11g,
-	 * otherwise we transmit them at 1Mb/s */
-	if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
-	    !conf_is_ht(&sc->hw->conf))
-		enable_g_protection = 1;
+	bss_conf = &tx_info->control.vif->bss_conf;
+
+	if (!bss_conf->basic_rates)
+		return;
 
 	/*
-	 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
-	 * just CTS.  Note that this is only done for OFDM/HT unicast frames.
+	 * For now, use the lowest allowed basic rate for HT frames.
 	 */
-	if ((tx_info->control.vif &&
-	     tx_info->control.vif->bss_conf.use_cts_prot) &&
-	    (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
-	     WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
-		rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
-		cix = rate_table->info[enable_g_protection].ctrl_rate;
-	}
-
-	tx_info->control.rts_cts_rate_idx = cix;
+	tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
 }
 
 static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
@@ -789,14 +749,8 @@
 	try_per_rate = 4;
 
 	rate_table = ath_rc_priv->rate_table;
-	rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
-				     &is_probe, false);
+	rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
 
-	/*
-	 * If we're in HT mode and both us and our peer supports LDPC.
-	 * We don't need to check our own device's capabilities as our own
-	 * ht capabilities would have already been intersected with our peer's.
-	 */
 	if (conf_is_ht(&sc->hw->conf) &&
 	    (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
 		tx_info->flags |= IEEE80211_TX_CTL_LDPC;
@@ -806,52 +760,45 @@
 		tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
 
 	if (is_probe) {
-		/* set one try for probe rates. For the
-		 * probes don't enable rts */
+		/*
+		 * Set one try for probe rates. For the
+		 * probes don't enable RTS.
+		 */
 		ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
 				       1, rix, 0);
-
-		/* Get the next tried/allowed rate. No RTS for the next series
-		 * after the probe rate
+		/*
+		 * Get the next tried/allowed rate.
+		 * No RTS for the next series after the probe rate.
 		 */
-		ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
+		ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
 		ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
 				       try_per_rate, rix, 0);
 
 		tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
 	} else {
-		/* Set the chosen rate. No RTS for first series entry. */
+		/*
+		 * Set the chosen rate. No RTS for first series entry.
+		 */
 		ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
 				       try_per_rate, rix, 0);
 	}
 
-	/* Fill in the other rates for multirate retry */
-	for ( ; i < 3; i++) {
+	for ( ; i < 4; i++) {
+		/*
+		 * Use twice the number of tries for the last MRR segment.
+		 */
+		if (i + 1 == 4)
+			try_per_rate = 8;
 
-		ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
-		/* All other rates in the series have RTS enabled */
+		ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
+
+		/*
+		 * All other rates in the series have RTS enabled.
+		 */
 		ath_rc_rate_set_series(rate_table, &rates[i], txrc,
 				       try_per_rate, rix, 1);
 	}
 
-	/* Use twice the number of tries for the last MRR segment. */
-	try_per_rate = 8;
-
-	/*
-	 * If the last rate in the rate series is MCS and has
-	 * more than 80% of per thresh, then use a legacy rate
-	 * as last retry to ensure that the frame is tried in both
-	 * MCS and legacy rate.
-	 */
-	ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
-	if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
-	    (ath_rc_priv->per[rix] > 45))
-		rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
-				&is_probe, true);
-
-	/* All other rates in the series have RTS enabled */
-	ath_rc_rate_set_series(rate_table, &rates[i], txrc,
-			       try_per_rate, rix, 1);
 	/*
 	 * NB:Change rate series to enable aggregation when operating
 	 * at lower MCS rates. When first rate in series is MCS2
@@ -893,7 +840,6 @@
 		rates[0].count = ATH_TXMAXTRY;
 	}
 
-	/* Setup RTS/CTS */
 	ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
 }
 
@@ -1046,9 +992,6 @@
 	stats->per = per;
 }
 
-/* Update PER, RSSI and whatever else that the code thinks it is doing.
-   If you can make sense of all this, you really need to go out more. */
-
 static void ath_rc_update_ht(struct ath_softc *sc,
 			     struct ath_rate_priv *ath_rc_priv,
 			     struct ieee80211_tx_info *tx_info,
@@ -1077,8 +1020,8 @@
 	if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
 	    rate_table->info[tx_rate].ratekbps <=
 	    rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
-		ath_rc_get_lower_rix(rate_table, ath_rc_priv,
-				     (u8)tx_rate, &ath_rc_priv->rate_max_phy);
+		ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
+				     &ath_rc_priv->rate_max_phy);
 
 		/* Don't probe for a little while. */
 		ath_rc_priv->probe_time = now_msec;
@@ -1122,25 +1065,42 @@
 
 }
 
+static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+	struct ath_rc_stats *stats;
+
+	stats = &rc->rcstats[final_rate];
+	stats->success++;
+}
 
 static void ath_rc_tx_status(struct ath_softc *sc,
 			     struct ath_rate_priv *ath_rc_priv,
-			     struct ieee80211_tx_info *tx_info,
-			     int final_ts_idx, int xretries, int long_retry)
+			     struct sk_buff *skb)
 {
-	const struct ath_rate_table *rate_table;
+	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_tx_rate *rates = tx_info->status.rates;
+	struct ieee80211_tx_rate *rate;
+	int final_ts_idx = 0, xretries = 0, long_retry = 0;
 	u8 flags;
 	u32 i = 0, rix;
 
-	rate_table = ath_rc_priv->rate_table;
+	for (i = 0; i < sc->hw->max_rates; i++) {
+		rate = &tx_info->status.rates[i];
+		if (rate->idx < 0 || !rate->count)
+			break;
+
+		final_ts_idx = i;
+		long_retry = rate->count - 1;
+	}
+
+	if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
+		xretries = 1;
 
 	/*
 	 * If the first rate is not the final index, there
 	 * are intermediate rate failures to be processed.
 	 */
 	if (final_ts_idx != 0) {
-		/* Process intermediate rates that failed.*/
 		for (i = 0; i < final_ts_idx ; i++) {
 			if (rates[i].count != 0 && (rates[i].idx >= 0)) {
 				flags = rates[i].flags;
@@ -1152,32 +1112,24 @@
 				    !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
 					return;
 
-				rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+				rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
 				ath_rc_update_ht(sc, ath_rc_priv, tx_info,
-						rix, xretries ? 1 : 2,
-						rates[i].count);
+						 rix, xretries ? 1 : 2,
+						 rates[i].count);
 			}
 		}
-	} else {
-		/*
-		 * Handle the special case of MIMO PS burst, where the second
-		 * aggregate is sent out with only one rate and one try.
-		 * Treating it as an excessive retry penalizes the rate
-		 * inordinately.
-		 */
-		if (rates[0].count == 1 && xretries == 1)
-			xretries = 2;
 	}
 
-	flags = rates[i].flags;
+	flags = rates[final_ts_idx].flags;
 
 	/* If HT40 and we have switched mode from 40 to 20 => don't update */
 	if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
 	    !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
 		return;
 
-	rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+	rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
 	ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
+	ath_debug_stat_rc(ath_rc_priv, rix);
 }
 
 static const
@@ -1185,8 +1137,6 @@
 					     enum ieee80211_band band,
 					     bool is_ht)
 {
-	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
 	switch(band) {
 	case IEEE80211_BAND_2GHZ:
 		if (is_ht)
@@ -1197,34 +1147,25 @@
 			return &ar5416_11na_ratetable;
 		return &ar5416_11a_ratetable;
 	default:
-		ath_dbg(common, CONFIG, "Invalid band\n");
 		return NULL;
 	}
 }
 
 static void ath_rc_init(struct ath_softc *sc,
-			struct ath_rate_priv *ath_rc_priv,
-			struct ieee80211_supported_band *sband,
-			struct ieee80211_sta *sta,
-			const struct ath_rate_table *rate_table)
+			struct ath_rate_priv *ath_rc_priv)
 {
+	const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
 	struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-	struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates;
 	u8 i, j, k, hi = 0, hthi = 0;
 
-	/* Initial rate table size. Will change depending
-	 * on the working rate set */
 	ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
 
-	/* Initialize thresholds according to the global rate table */
 	for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
 		ath_rc_priv->per[i] = 0;
+		ath_rc_priv->valid_rate_index[i] = 0;
 	}
 
-	/* Determine the valid rates */
-	ath_rc_init_valid_rate_idx(ath_rc_priv);
-
 	for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
 		for (j = 0; j < RATE_TABLE_SIZE; j++)
 			ath_rc_priv->valid_phy_rateidx[i][j] = 0;
@@ -1232,25 +1173,19 @@
 	}
 
 	if (!rateset->rs_nrates) {
-		/* No working rate, just initialize valid rates */
-		hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
-					    ath_rc_priv->ht_cap);
+		hi = ath_rc_init_validrates(ath_rc_priv);
 	} else {
-		/* Use intersection of working rates and valid rates */
-		hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table,
-					   rateset, ath_rc_priv->ht_cap);
-		if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) {
-			hthi = ath_rc_setvalid_htrates(ath_rc_priv,
-						       rate_table,
-						       ht_mcs,
-						       ath_rc_priv->ht_cap);
-		}
+		hi = ath_rc_setvalid_rates(ath_rc_priv, true);
+
+		if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
+			hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
+
 		hi = max(hi, hthi);
 	}
 
 	ath_rc_priv->rate_table_size = hi + 1;
 	ath_rc_priv->rate_max_phy = 0;
-	BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+	WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
 
 	for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
 		for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1258,28 +1193,26 @@
 				ath_rc_priv->valid_phy_rateidx[i][j];
 		}
 
-		if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1)
-		    || !ath_rc_priv->valid_phy_ratecnt[i])
+		if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
+		    !ath_rc_priv->valid_phy_ratecnt[i])
 			continue;
 
 		ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
 	}
-	BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-	BUG_ON(k > RATE_TABLE_SIZE);
+	WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+	WARN_ON(k > RATE_TABLE_SIZE);
 
 	ath_rc_priv->max_valid_rate = k;
-	ath_rc_sort_validrates(rate_table, ath_rc_priv);
+	ath_rc_sort_validrates(ath_rc_priv);
 	ath_rc_priv->rate_max_phy = (k > 4) ?
-					ath_rc_priv->valid_rate_index[k-4] :
-					ath_rc_priv->valid_rate_index[k-1];
-	ath_rc_priv->rate_table = rate_table;
+		ath_rc_priv->valid_rate_index[k-4] :
+		ath_rc_priv->valid_rate_index[k-1];
 
 	ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
 		ath_rc_priv->ht_cap);
 }
 
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
-			       bool is_cw40, bool is_sgi)
+static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
 {
 	u8 caps = 0;
 
@@ -1289,9 +1222,10 @@
 			caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
 		else if (sta->ht_cap.mcs.rx_mask[1])
 			caps |= WLAN_RC_DS_FLAG;
-		if (is_cw40)
+		if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
 			caps |= WLAN_RC_40_FLAG;
-		if (is_sgi)
+		if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40 ||
+		    sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
 			caps |= WLAN_RC_SGI_FLAG;
 	}
 
@@ -1319,15 +1253,6 @@
 /* mac80211 Rate Control callbacks */
 /***********************************/
 
-static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-	struct ath_rc_stats *stats;
-
-	stats = &rc->rcstats[final_rate];
-	stats->success++;
-}
-
-
 static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
 			  struct ieee80211_sta *sta, void *priv_sta,
 			  struct sk_buff *skb)
@@ -1335,22 +1260,8 @@
 	struct ath_softc *sc = priv;
 	struct ath_rate_priv *ath_rc_priv = priv_sta;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_hdr *hdr;
-	int final_ts_idx = 0, tx_status = 0;
-	int long_retry = 0;
-	__le16 fc;
-	int i;
-
-	hdr = (struct ieee80211_hdr *)skb->data;
-	fc = hdr->frame_control;
-	for (i = 0; i < sc->hw->max_rates; i++) {
-		struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
-		if (rate->idx < 0 || !rate->count)
-			break;
-
-		final_ts_idx = i;
-		long_retry = rate->count - 1;
-	}
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	__le16 fc = hdr->frame_control;
 
 	if (!priv_sta || !ieee80211_is_data(fc))
 		return;
@@ -1363,11 +1274,7 @@
 	if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
 		return;
 
-	if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
-		tx_status = 1;
-
-	ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
-			 long_retry);
+	ath_rc_tx_status(sc, ath_rc_priv, skb);
 
 	/* Check if aggregation has to be enabled for this tid */
 	if (conf_is_ht(&sc->hw->conf) &&
@@ -1383,19 +1290,14 @@
 				ieee80211_start_tx_ba_session(sta, tid, 0);
 		}
 	}
-
-	ath_debug_stat_rc(ath_rc_priv,
-		ath_rc_get_rateindex(ath_rc_priv->rate_table,
-			&tx_info->status.rates[final_ts_idx]));
 }
 
 static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
                           struct ieee80211_sta *sta, void *priv_sta)
 {
 	struct ath_softc *sc = priv;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_rate_priv *ath_rc_priv = priv_sta;
-	const struct ath_rate_table *rate_table;
-	bool is_cw40, is_sgi = false;
 	int i, j = 0;
 
 	for (i = 0; i < sband->n_bitrates; i++) {
@@ -1417,20 +1319,15 @@
 		ath_rc_priv->neg_ht_rates.rs_nrates = j;
 	}
 
-	is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+	ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
+							sta->ht_cap.ht_supported);
+	if (!ath_rc_priv->rate_table) {
+		ath_err(common, "No rate table chosen\n");
+		return;
+	}
 
-	if (is_cw40)
-		is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
-	else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-		is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
-
-	/* Choose rate table first */
-
-	rate_table = ath_choose_rate_table(sc, sband->band,
-	                      sta->ht_cap.ht_supported);
-
-	ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
-	ath_rc_init(sc, priv_sta, sband, sta, rate_table);
+	ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+	ath_rc_init(sc, priv_sta);
 }
 
 static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
@@ -1439,40 +1336,14 @@
 {
 	struct ath_softc *sc = priv;
 	struct ath_rate_priv *ath_rc_priv = priv_sta;
-	const struct ath_rate_table *rate_table = NULL;
-	bool oper_cw40 = false, oper_sgi;
-	bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
-	bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
-
-	/* FIXME: Handle AP mode later when we support CWM */
 
 	if (changed & IEEE80211_RC_BW_CHANGED) {
-		if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
-			return;
+		ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+		ath_rc_init(sc, priv_sta);
 
-		if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-			oper_cw40 = true;
-
-		if (oper_cw40)
-			oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
-				   true : false;
-		else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-			oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
-				   true : false;
-		else
-			oper_sgi = false;
-
-		if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
-			rate_table = ath_choose_rate_table(sc, sband->band,
-						   sta->ht_cap.ht_supported);
-			ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
-						   oper_cw40, oper_sgi);
-			ath_rc_init(sc, priv_sta, sband, sta, rate_table);
-
-			ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
-				"Operating HT Bandwidth changed to: %d\n",
-				sc->hw->conf.channel_type);
-		}
+		ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
+			"Operating HT Bandwidth changed to: %d\n",
+			sc->hw->conf.channel_type);
 	}
 }
 
@@ -1484,7 +1355,7 @@
 	struct ath_rate_priv *rc = file->private_data;
 	char *buf;
 	unsigned int len = 0, max;
-	int i = 0;
+	int rix;
 	ssize_t retval;
 
 	if (rc->rate_table == NULL)
@@ -1500,7 +1371,8 @@
 		       "HT", "MCS", "Rate",
 		       "Success", "Retries", "XRetries", "PER");
 
-	for (i = 0; i < rc->rate_table_size; i++) {
+	for (rix = 0; rix < rc->max_valid_rate; rix++) {
+		u8 i = rc->valid_rate_index[rix];
 		u32 ratekbps = rc->rate_table->info[i].ratekbps;
 		struct ath_rc_stats *stats = &rc->rcstats[i];
 		char mcs[5];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 75f8e9b..268e67d 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -160,10 +160,6 @@
 		u32 user_ratekbps;
 		u8 ratecode;
 		u8 dot11rate;
-		u8 ctrl_rate;
-		u8 cw40index;
-		u8 sgi_index;
-		u8 ht_index;
 	} info[RATE_TABLE_SIZE];
 	u32 probe_interval;
 	u8 initial_ratemax;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 12aca02..4480c0c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1044,7 +1044,6 @@
 	struct ieee80211_hw *hw = sc->hw;
 	struct ieee80211_hdr *hdr;
 	int retval;
-	bool decrypt_error = false;
 	struct ath_rx_status rs;
 	enum ath9k_rx_qtype qtype;
 	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
@@ -1066,6 +1065,7 @@
 	tsf_lower = tsf & 0xffffffff;
 
 	do {
+		bool decrypt_error = false;
 		/* If handling rx interrupt and flush is in progress => exit */
 		if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
 			break;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 2c9da6b..ef91f6c 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1773,11 +1773,12 @@
 	TX_STAT_INC(txq->axq_qnum, queued);
 }
 
-static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
+static void setup_frame_info(struct ieee80211_hw *hw,
+			     struct ieee80211_sta *sta,
+			     struct sk_buff *skb,
 			     int framelen)
 {
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_sta *sta = tx_info->control.sta;
 	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	const struct ieee80211_rate *rate;
@@ -1935,7 +1936,7 @@
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_sta *sta = info->control.sta;
+	struct ieee80211_sta *sta = txctl->sta;
 	struct ieee80211_vif *vif = info->control.vif;
 	struct ath_softc *sc = hw->priv;
 	struct ath_txq *txq = txctl->txq;
@@ -1979,7 +1980,7 @@
 	    !ieee80211_is_data(hdr->frame_control))
 		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
 
-	setup_frame_info(hw, skb, frmlen);
+	setup_frame_info(hw, sta, skb, frmlen);
 
 	/*
 	 * At this point, the vif, hw_key and sta pointers in the tx control
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 376be11..2aa4a59 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -425,6 +425,7 @@
 	bool rx_has_plcp;
 	struct sk_buff *rx_failover;
 	int rx_failover_missing;
+	u32 ampdu_ref;
 
 	/* FIFO for collecting outstanding BlockAckRequest */
 	struct list_head bar_list[__AR9170_NUM_TXQ];
@@ -577,7 +578,9 @@
 void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
 
 /* TX */
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw,
+		    struct ieee80211_tx_control *control,
+		    struct sk_buff *skb);
 void carl9170_tx_janitor(struct work_struct *work);
 void carl9170_tx_process_status(struct ar9170 *ar,
 				const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index c5ca6f1..24ac287 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -341,6 +341,7 @@
 		if (SUPP(CARL9170FW_WLANTX_CAB)) {
 			if_comb_types |=
 				BIT(NL80211_IFTYPE_AP) |
+				BIT(NL80211_IFTYPE_MESH_POINT) |
 				BIT(NL80211_IFTYPE_P2P_GO);
 		}
 	}
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 53415bf..f867628 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -318,10 +318,10 @@
 		bssid = common->curbssid;
 
 		switch (vif->type) {
-		case NL80211_IFTYPE_MESH_POINT:
 		case NL80211_IFTYPE_ADHOC:
 			cam_mode |= AR9170_MAC_CAM_IBSS;
 			break;
+		case NL80211_IFTYPE_MESH_POINT:
 		case NL80211_IFTYPE_AP:
 			cam_mode |= AR9170_MAC_CAM_AP;
 
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 858e58d..18554ab 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -616,10 +616,12 @@
 
 			goto unlock;
 
+		case NL80211_IFTYPE_MESH_POINT:
 		case NL80211_IFTYPE_AP:
 			if ((vif->type == NL80211_IFTYPE_STATION) ||
 			    (vif->type == NL80211_IFTYPE_WDS) ||
-			    (vif->type == NL80211_IFTYPE_AP))
+			    (vif->type == NL80211_IFTYPE_AP) ||
+			    (vif->type == NL80211_IFTYPE_MESH_POINT))
 				break;
 
 			err = -EBUSY;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 6f6a341..a0b7230 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -206,6 +206,7 @@
 
 		case NL80211_IFTYPE_AP:
 		case NL80211_IFTYPE_ADHOC:
+		case NL80211_IFTYPE_MESH_POINT:
 			carl9170_update_beacon(ar, true);
 			break;
 
@@ -623,7 +624,8 @@
 #undef TID_CHECK
 }
 
-static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
+static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
+				 struct ieee80211_rx_status *rx_status)
 {
 	__le16 fc;
 
@@ -636,6 +638,9 @@
 		return true;
 	}
 
+	rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+	rx_status->ampdu_reference = ar->ampdu_ref;
+
 	/*
 	 * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
 	 * certain frame types can be part of an aMPDU.
@@ -684,12 +689,15 @@
 	if (unlikely(len < sizeof(*mac)))
 		goto drop;
 
+	memset(&status, 0, sizeof(status));
+
 	mpdu_len = len - sizeof(*mac);
 
 	mac = (void *)(buf + mpdu_len);
 	mac_status = mac->status;
 	switch (mac_status & AR9170_RX_STATUS_MPDU) {
 	case AR9170_RX_STATUS_MPDU_FIRST:
+		ar->ampdu_ref++;
 		/* Aggregated MPDUs start with an PLCP header */
 		if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
 			head = (void *) buf;
@@ -720,12 +728,13 @@
 		break;
 
 	case AR9170_RX_STATUS_MPDU_LAST:
+		status.flag |= RX_FLAG_AMPDU_IS_LAST;
+
 		/*
 		 * The last frame of an A-MPDU has an extra tail
 		 * which does contain the phy status of the whole
 		 * aggregate.
 		 */
-
 		if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
 			mpdu_len -= sizeof(struct ar9170_rx_phystatus);
 			phy = (void *)(buf + mpdu_len);
@@ -773,11 +782,10 @@
 	if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
 		goto drop;
 
-	memset(&status, 0, sizeof(status));
 	if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
 		goto drop;
 
-	if (!carl9170_ampdu_check(ar, buf, mac_status))
+	if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
 		goto drop;
 
 	if (phy)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6a86814..84377cf 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -867,14 +867,15 @@
 	return false;
 }
 
-static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
+static int carl9170_tx_prepare(struct ar9170 *ar,
+			       struct ieee80211_sta *sta,
+			       struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr;
 	struct _carl9170_tx_superframe *txc;
 	struct carl9170_vif_info *cvif;
 	struct ieee80211_tx_info *info;
 	struct ieee80211_tx_rate *txrate;
-	struct ieee80211_sta *sta;
 	struct carl9170_tx_info *arinfo;
 	unsigned int hw_queue;
 	int i;
@@ -910,8 +911,6 @@
 	else
 		cvif = NULL;
 
-	sta = info->control.sta;
-
 	txc = (void *)skb_push(skb, sizeof(*txc));
 	memset(txc, 0, sizeof(*txc));
 
@@ -1457,20 +1456,21 @@
 	return false;
 }
 
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw,
+		    struct ieee80211_tx_control *control,
+		    struct sk_buff *skb)
 {
 	struct ar9170 *ar = hw->priv;
 	struct ieee80211_tx_info *info;
-	struct ieee80211_sta *sta;
+	struct ieee80211_sta *sta = control->sta;
 	bool run;
 
 	if (unlikely(!IS_STARTED(ar)))
 		goto err_free;
 
 	info = IEEE80211_SKB_CB(skb);
-	sta = info->control.sta;
 
-	if (unlikely(carl9170_tx_prepare(ar, skb)))
+	if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
 		goto err_free;
 
 	carl9170_tx_accounting(ar, skb);
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 4648bbf..098fe9e 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -4,6 +4,7 @@
 b43-$(CONFIG_B43_PHY_N)		+= tables_nphy.o
 b43-$(CONFIG_B43_PHY_N)		+= radio_2055.o
 b43-$(CONFIG_B43_PHY_N)		+= radio_2056.o
+b43-$(CONFIG_B43_PHY_N)		+= radio_2057.o
 b43-y				+= phy_common.o
 b43-y				+= phy_g.o
 b43-y				+= phy_a.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 7c899fc..b298e5d 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -241,16 +241,18 @@
 #define B43_SHM_SH_PHYVER		0x0050	/* PHY version */
 #define B43_SHM_SH_PHYTYPE		0x0052	/* PHY type */
 #define B43_SHM_SH_ANTSWAP		0x005C	/* Antenna swap threshold */
-#define B43_SHM_SH_HOSTFLO		0x005E	/* Hostflags for ucode options (low) */
-#define B43_SHM_SH_HOSTFMI		0x0060	/* Hostflags for ucode options (middle) */
-#define B43_SHM_SH_HOSTFHI		0x0062	/* Hostflags for ucode options (high) */
+#define B43_SHM_SH_HOSTF1		0x005E	/* Hostflags 1 for ucode options */
+#define B43_SHM_SH_HOSTF2		0x0060	/* Hostflags 2 for ucode options */
+#define B43_SHM_SH_HOSTF3		0x0062	/* Hostflags 3 for ucode options */
 #define B43_SHM_SH_RFATT		0x0064	/* Current radio attenuation value */
 #define B43_SHM_SH_RADAR		0x0066	/* Radar register */
 #define B43_SHM_SH_PHYTXNOI		0x006E	/* PHY noise directly after TX (lower 8bit only) */
 #define B43_SHM_SH_RFRXSP1		0x0072	/* RF RX SP Register 1 */
+#define B43_SHM_SH_HOSTF4		0x0078	/* Hostflags 4 for ucode options */
 #define B43_SHM_SH_CHAN			0x00A0	/* Current channel (low 8bit only) */
 #define  B43_SHM_SH_CHAN_5GHZ		0x0100	/* Bit set, if 5 Ghz channel */
 #define  B43_SHM_SH_CHAN_40MHZ		0x0200	/* Bit set, if 40 Mhz channel width */
+#define B43_SHM_SH_HOSTF5		0x00D4	/* Hostflags 5 for ucode options */
 #define B43_SHM_SH_BCMCFIFOID		0x0108	/* Last posted cookie to the bcast/mcast FIFO */
 /* TSSI information */
 #define B43_SHM_SH_TSSI_CCK		0x0058	/* TSSI for last 4 CCK frames (32bit) */
@@ -415,6 +417,8 @@
 #define B43_PHYTYPE_HT			0x07
 #define B43_PHYTYPE_LCN			0x08
 #define B43_PHYTYPE_LCNXN		0x09
+#define B43_PHYTYPE_LCN40		0x0a
+#define B43_PHYTYPE_AC			0x0b
 
 /* PHYRegisters */
 #define B43_PHY_ILT_A_CTRL		0x0072
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b80352b..73730e9 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -533,11 +533,11 @@
 {
 	u64 ret;
 
-	ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI);
+	ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3);
 	ret <<= 16;
-	ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI);
+	ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2);
 	ret <<= 16;
-	ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO);
+	ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1);
 
 	return ret;
 }
@@ -550,9 +550,9 @@
 	lo = (value & 0x00000000FFFFULL);
 	mi = (value & 0x0000FFFF0000ULL) >> 16;
 	hi = (value & 0xFFFF00000000ULL) >> 32;
-	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo);
-	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi);
-	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
+	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo);
+	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi);
+	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi);
 }
 
 /* Read the firmware capabilities bitmask (Opensource firmware only) */
@@ -2719,32 +2719,37 @@
 	if (dev->dev->chip_id == 0x4301) {
 		mask |= 0x0060;
 		set |= 0x0060;
+	} else if (dev->dev->chip_id == 0x5354) {
+		/* Don't allow overtaking buttons GPIOs */
+		set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */
 	}
-	if (dev->dev->chip_id == 0x5354)
-		set &= 0xff02;
+
 	if (0 /* FIXME: conditional unknown */ ) {
 		b43_write16(dev, B43_MMIO_GPIO_MASK,
 			    b43_read16(dev, B43_MMIO_GPIO_MASK)
 			    | 0x0100);
-		mask |= 0x0180;
-		set |= 0x0180;
+		/* BT Coexistance Input */
+		mask |= 0x0080;
+		set |= 0x0080;
+		/* BT Coexistance Out */
+		mask |= 0x0100;
+		set |= 0x0100;
 	}
 	if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
+		/* PA is controlled by gpio 9, let ucode handle it */
 		b43_write16(dev, B43_MMIO_GPIO_MASK,
 			    b43_read16(dev, B43_MMIO_GPIO_MASK)
 			    | 0x0200);
 		mask |= 0x0200;
 		set |= 0x0200;
 	}
-	if (dev->dev->core_rev >= 2)
-		mask |= 0x0010;	/* FIXME: This is redundant. */
 
 	switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
 	case B43_BUS_BCMA:
 		bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
 				(bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
-					BCMA_CC_GPIOCTL) & mask) | set);
+					BCMA_CC_GPIOCTL) & ~mask) | set);
 		break;
 #endif
 #ifdef CONFIG_B43_SSB
@@ -2753,7 +2758,7 @@
 		if (gpiodev)
 			ssb_write32(gpiodev, B43_GPIO_CONTROL,
 				    (ssb_read32(gpiodev, B43_GPIO_CONTROL)
-				    & mask) | set);
+				    & ~mask) | set);
 		break;
 #endif
 	}
@@ -3407,7 +3412,8 @@
 }
 
 static void b43_op_tx(struct ieee80211_hw *hw,
-		     struct sk_buff *skb)
+		      struct ieee80211_tx_control *control,
+		      struct sk_buff *skb)
 {
 	struct b43_wl *wl = hw_to_b43_wl(hw);
 
@@ -4277,6 +4283,35 @@
 	return err;
 }
 
+static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type)
+{
+	switch (phy_type) {
+	case B43_PHYTYPE_A:
+		return "A";
+	case B43_PHYTYPE_B:
+		return "B";
+	case B43_PHYTYPE_G:
+		return "G";
+	case B43_PHYTYPE_N:
+		return "N";
+	case B43_PHYTYPE_LP:
+		return "LP";
+	case B43_PHYTYPE_SSLPN:
+		return "SSLPN";
+	case B43_PHYTYPE_HT:
+		return "HT";
+	case B43_PHYTYPE_LCN:
+		return "LCN";
+	case B43_PHYTYPE_LCNXN:
+		return "LCNXN";
+	case B43_PHYTYPE_LCN40:
+		return "LCN40";
+	case B43_PHYTYPE_AC:
+		return "AC";
+	}
+	return "UNKNOWN";
+}
+
 /* Get PHY and RADIO versioning numbers */
 static int b43_phy_versioning(struct b43_wldev *dev)
 {
@@ -4337,13 +4372,13 @@
 		unsupported = 1;
 	}
 	if (unsupported) {
-		b43err(dev->wl, "FOUND UNSUPPORTED PHY "
-		       "(Analog %u, Type %u, Revision %u)\n",
-		       analog_type, phy_type, phy_rev);
+		b43err(dev->wl, "FOUND UNSUPPORTED PHY (Analog %u, Type %d (%s), Revision %u)\n",
+		       analog_type, phy_type, b43_phy_name(dev, phy_type),
+		       phy_rev);
 		return -EOPNOTSUPP;
 	}
-	b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n",
-	       analog_type, phy_type, phy_rev);
+	b43info(dev->wl, "Found PHY: Analog %u, Type %d (%s), Revision %u\n",
+		analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev);
 
 	/* Get RADIO versioning */
 	if (dev->dev->core_rev >= 24) {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3f8883b..f01676a 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -240,6 +240,21 @@
 			  (b43_radio_read16(dev, offset) & mask) | set);
 }
 
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+			  u16 value, int delay, int timeout)
+{
+	u16 val;
+	int i;
+
+	for (i = 0; i < timeout; i += delay) {
+		val = b43_radio_read(dev, offset);
+		if ((val & mask) == value)
+			return true;
+		udelay(delay);
+	}
+	return false;
+}
+
 u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
 {
 	assert_mac_suspended(dev);
@@ -428,7 +443,7 @@
 	average = (a + b + c + d + 2) / 4;
 	if (is_ofdm) {
 		/* Adjust for CCK-boost */
-		if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO)
+		if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1)
 		    & B43_HF_CCKBOOST)
 			average = (average >= 13) ? (average - 13) : 0;
 	}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9233b13..f1b9993 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -365,6 +365,12 @@
 void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
 
 /**
+ * b43_radio_wait_value - Waits for a given value in masked register read
+ */
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+			  u16 value, int delay, int timeout);
+
+/**
  * b43_radio_lock - Lock firmware radio register access
  */
 void b43_radio_lock(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b92bb9c..3c35382 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -32,6 +32,7 @@
 #include "tables_nphy.h"
 #include "radio_2055.h"
 #include "radio_2056.h"
+#include "radio_2057.h"
 #include "main.h"
 
 struct nphy_txgains {
@@ -126,6 +127,46 @@
 	b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
+static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field,
+					      u16 value, u8 core, bool off,
+					      u8 override)
+{
+	const struct nphy_rf_control_override_rev7 *e;
+	u16 en_addrs[3][2] = {
+		{ 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
+	};
+	u16 en_addr;
+	u16 en_mask = field;
+	u16 val_addr;
+	u8 i;
+
+	/* Remember: we can get NULL! */
+	e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
+
+	for (i = 0; i < 2; i++) {
+		if (override >= ARRAY_SIZE(en_addrs)) {
+			b43err(dev->wl, "Invalid override value %d\n", override);
+			return;
+		}
+		en_addr = en_addrs[override][i];
+
+		val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+
+		if (off) {
+			b43_phy_mask(dev, en_addr, ~en_mask);
+			if (e) /* Do it safer, better than wl */
+				b43_phy_mask(dev, val_addr, ~e->val_mask);
+		} else {
+			if (!core || (core & (1 << i))) {
+				b43_phy_set(dev, en_addr, en_mask);
+				if (e)
+					b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift));
+			}
+		}
+	}
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
 static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
 						u16 value, u8 core, bool off)
@@ -459,6 +500,137 @@
 }
 
 /**************************************************
+ * Radio 0x2057
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */
+static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	u16 tmp;
+
+	if (phy->radio_rev == 5) {
+		b43_phy_mask(dev, 0x342, ~0x2);
+		udelay(10);
+		b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1);
+		b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
+	}
+
+	b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1);
+	udelay(10);
+	b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3);
+	if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) {
+		b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+		return 0;
+	}
+	b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
+	tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E;
+	b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
+
+	if (phy->radio_rev == 5) {
+		b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
+		b43_radio_mask(dev, 0x1ca, ~0x2);
+	}
+	if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
+		b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
+		b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
+				  tmp << 2);
+	}
+
+	return tmp & 0x3e;
+}
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */
+static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	bool special = (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+			phy->radio_rev == 6);
+	u16 tmp;
+
+	if (special) {
+		b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61);
+		b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
+	} else {
+		b43_radio_write(dev, 0x1AE, 0x61);
+		b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
+	}
+	b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+	b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+	if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+				  5000000))
+		b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+	b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+	if (special) {
+		b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69);
+		b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+	} else {
+		b43_radio_write(dev, 0x1AE, 0x69);
+		b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5);
+	}
+	b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+	b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+	if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+				  5000000))
+		b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+	b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+	if (special) {
+		b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73);
+		b43_radio_write(dev, R2057_RCCAL_X1, 0x28);
+		b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+	} else {
+		b43_radio_write(dev, 0x1AE, 0x73);
+		b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+		b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99);
+	}
+	b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+	if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+				  5000000)) {
+		b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+		return 0;
+	}
+	tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP);
+	b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+	return tmp;
+}
+
+static void b43_radio_2057_init_pre(struct b43_wldev *dev)
+{
+	b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+	/* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+	b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE);
+	b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+	b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU);
+}
+
+static void b43_radio_2057_init_post(struct b43_wldev *dev)
+{
+	b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1);
+
+	b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
+	b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
+	mdelay(2);
+	b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
+	b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
+
+	if (dev->phy.n->init_por) {
+		b43_radio_2057_rcal(dev);
+		b43_radio_2057_rccal(dev);
+	}
+	b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
+
+	dev->phy.n->init_por = false;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
+static void b43_radio_2057_init(struct b43_wldev *dev)
+{
+	b43_radio_2057_init_pre(dev);
+	r2057_upload_inittabs(dev);
+	b43_radio_2057_init_post(dev);
+}
+
+/**************************************************
  * Radio 0x2056
  **************************************************/
 
@@ -545,7 +717,9 @@
 	enum ieee80211_band band = b43_current_band(dev->wl);
 	u16 offset;
 	u8 i;
-	u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+	u16 bias, cbias;
+	u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
+	u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
 
 	B43_WARN_ON(dev->phy.rev < 3);
 
@@ -630,7 +804,56 @@
 			b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
 		}
 	} else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
-		/* TODO */
+		u16 freq = dev->phy.channel_freq;
+		if (freq < 5100) {
+			paa_boost = 0xA;
+			pada_boost = 0x77;
+			pgaa_boost = 0xF;
+			mixa_boost = 0xF;
+		} else if (freq < 5340) {
+			paa_boost = 0x8;
+			pada_boost = 0x77;
+			pgaa_boost = 0xFB;
+			mixa_boost = 0xF;
+		} else if (freq < 5650) {
+			paa_boost = 0x0;
+			pada_boost = 0x77;
+			pgaa_boost = 0xB;
+			mixa_boost = 0xF;
+		} else {
+			paa_boost = 0x0;
+			pada_boost = 0x77;
+			if (freq != 5825)
+				pgaa_boost = -(freq - 18) / 36 + 168;
+			else
+				pgaa_boost = 6;
+			mixa_boost = 0xF;
+		}
+
+		for (i = 0; i < 2; i++) {
+			offset = i ? B2056_TX1 : B2056_TX0;
+
+			b43_radio_write(dev,
+				offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost);
+			b43_radio_write(dev,
+				offset | B2056_TX_PADA_BOOST_TUNE, pada_boost);
+			b43_radio_write(dev,
+				offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost);
+			b43_radio_write(dev,
+				offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost);
+			b43_radio_write(dev,
+				offset | B2056_TX_TXSPARE1, 0x30);
+			b43_radio_write(dev,
+				offset | B2056_TX_PA_SPARE2, 0xee);
+			b43_radio_write(dev,
+				offset | B2056_TX_PADA_CASCBIAS, 0x03);
+			b43_radio_write(dev,
+				offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+			b43_radio_write(dev,
+				offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+			b43_radio_write(dev,
+				offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+		}
 	}
 
 	udelay(50);
@@ -643,6 +866,37 @@
 	udelay(300);
 }
 
+static u8 b43_radio_2056_rcal(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	u16 mast2, tmp;
+
+	if (phy->rev != 3)
+		return 0;
+
+	mast2 = b43_radio_read(dev, B2056_SYN_PLL_MAST2);
+	b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2 | 0x7);
+
+	udelay(10);
+	b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+	udelay(10);
+	b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x09);
+
+	if (!b43_radio_wait_value(dev, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100,
+				  1000000)) {
+		b43err(dev->wl, "Radio recalibration timeout\n");
+		return 0;
+	}
+
+	b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+	tmp = b43_radio_read(dev, B2056_SYN_RCAL_CODE_OUT);
+	b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x00);
+
+	b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2);
+
+	return tmp & 0x1f;
+}
+
 static void b43_radio_init2056_pre(struct b43_wldev *dev)
 {
 	b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
@@ -665,10 +919,8 @@
 	b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
 	b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
 	b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
-	/*
-	if (nphy->init_por)
-		Call Radio 2056 Recalibrate
-	*/
+	if (dev->phy.n->init_por)
+		b43_radio_2056_rcal(dev);
 }
 
 /*
@@ -680,6 +932,8 @@
 	b43_radio_init2056_pre(dev);
 	b2056_upload_inittabs(dev, 0, 0);
 	b43_radio_init2056_post(dev);
+
+	dev->phy.n->init_por = false;
 }
 
 /**************************************************
@@ -753,8 +1007,6 @@
 {
 	struct b43_phy_n *nphy = dev->phy.n;
 	struct ssb_sprom *sprom = dev->dev->bus_sprom;
-	int i;
-	u16 val;
 	bool workaround = false;
 
 	if (sprom->revision < 4)
@@ -777,15 +1029,7 @@
 	b43_radio_set(dev, B2055_CAL_MISC, 0x1);
 	msleep(1);
 	b43_radio_set(dev, B2055_CAL_MISC, 0x40);
-	for (i = 0; i < 200; i++) {
-		val = b43_radio_read(dev, B2055_CAL_COUT2);
-		if (val & 0x80) {
-			i = 0;
-			break;
-		}
-		udelay(10);
-	}
-	if (i)
+	if (!b43_radio_wait_value(dev, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000))
 		b43err(dev->wl, "radio post init timeout\n");
 	b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
 	b43_switch_channel(dev, dev->phy.channel);
@@ -1860,12 +2104,334 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
 static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
 {
-	if (dev->phy.rev >= 3)
+	if (dev->phy.rev >= 7)
+		; /* TODO */
+	else if (dev->phy.rev >= 3)
 		b43_nphy_gain_ctl_workarounds_rev3plus(dev);
 	else
 		b43_nphy_gain_ctl_workarounds_rev1_2(dev);
 }
 
+/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
+static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
+{
+	if (!offset)
+		offset = (dev->phy.is_40mhz) ? 0x159 : 0x154;
+	return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
+}
+
+static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
+{
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
+	struct b43_phy *phy = &dev->phy;
+
+	u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+					0x1F };
+	u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+
+	u16 ntab7_15e_16e[] = { 0x10f, 0x10f };
+	u8 ntab7_138_146[] = { 0x11, 0x11 };
+	u8 ntab7_133[] = { 0x77, 0x11, 0x11 };
+
+	u16 lpf_20, lpf_40, lpf_11b;
+	u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40;
+	u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40;
+	bool rccal_ovrd = false;
+
+	u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n;
+	u16 bias, conv, filt;
+
+	u32 tmp32;
+	u8 core;
+
+	if (phy->rev == 7) {
+		b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0x80FF, 0x2700);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0xFF80, 0x002E);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0x80FF, 0x3300);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0xFF80, 0x0037);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0x80FF, 0x3A00);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0xFF80, 0x003C);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0x80FF, 0x3E00);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0xFF80, 0x003E);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0x80FF, 0x3F00);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0xFF80, 0x0040);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0x80FF, 0x4000);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0xFF80, 0x0040);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0x80FF, 0x4000);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040);
+		b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
+	}
+	if (phy->rev <= 8) {
+		b43_phy_write(dev, 0x23F, 0x1B0);
+		b43_phy_write(dev, 0x240, 0x1B0);
+	}
+	if (phy->rev >= 8)
+		b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
+
+	b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2);
+	b43_ntab_write(dev, B43_NTAB16(8, 0x10), 2);
+	tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+	tmp32 &= 0xffffff;
+	b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e);
+	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e);
+
+	if (b43_nphy_ipa(dev))
+		b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+				rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+
+	b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000);
+	b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000);
+
+	lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
+	lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
+	lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
+	if (b43_nphy_ipa(dev)) {
+		if ((phy->radio_rev == 5 && phy->is_40mhz) ||
+		    phy->radio_rev == 7 || phy->radio_rev == 8) {
+			bcap_val = b43_radio_read(dev, 0x16b);
+			scap_val = b43_radio_read(dev, 0x16a);
+			scap_val_11b = scap_val;
+			bcap_val_11b = bcap_val;
+			if (phy->radio_rev == 5 && phy->is_40mhz) {
+				scap_val_11n_20 = scap_val;
+				bcap_val_11n_20 = bcap_val;
+				scap_val_11n_40 = bcap_val_11n_40 = 0xc;
+				rccal_ovrd = true;
+			} else { /* Rev 7/8 */
+				lpf_20 = 4;
+				lpf_11b = 1;
+				if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+					scap_val_11n_20 = 0xc;
+					bcap_val_11n_20 = 0xc;
+					scap_val_11n_40 = 0xa;
+					bcap_val_11n_40 = 0xa;
+				} else {
+					scap_val_11n_20 = 0x14;
+					bcap_val_11n_20 = 0x14;
+					scap_val_11n_40 = 0xf;
+					bcap_val_11n_40 = 0xf;
+				}
+				rccal_ovrd = true;
+			}
+		}
+	} else {
+		if (phy->radio_rev == 5) {
+			lpf_20 = 1;
+			lpf_40 = 3;
+			bcap_val = b43_radio_read(dev, 0x16b);
+			scap_val = b43_radio_read(dev, 0x16a);
+			scap_val_11b = scap_val;
+			bcap_val_11b = bcap_val;
+			scap_val_11n_20 = 0x11;
+			scap_val_11n_40 = 0x11;
+			bcap_val_11n_20 = 0x13;
+			bcap_val_11n_40 = 0x13;
+			rccal_ovrd = true;
+		}
+	}
+	if (rccal_ovrd) {
+		rx2tx_lut_20_11b = (bcap_val_11b << 8) |
+				   (scap_val_11b << 3) |
+				   lpf_11b;
+		rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) |
+				   (scap_val_11n_20 << 3) |
+				   lpf_20;
+		rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) |
+				   (scap_val_11n_40 << 3) |
+				   lpf_40;
+		for (core = 0; core < 2; core++) {
+			b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16),
+				       rx2tx_lut_20_11b);
+			b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16),
+				       rx2tx_lut_20_11n);
+			b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16),
+				       rx2tx_lut_20_11n);
+			b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16),
+				       rx2tx_lut_40_11n);
+			b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16),
+				       rx2tx_lut_40_11n);
+			b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16),
+				       rx2tx_lut_40_11n);
+			b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16),
+				       rx2tx_lut_40_11n);
+			b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
+				       rx2tx_lut_40_11n);
+		}
+		b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2);
+	}
+	b43_phy_write(dev, 0x32F, 0x3);
+	if (phy->radio_rev == 4 || phy->radio_rev == 6)
+		b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0);
+
+	if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) {
+		if (sprom->revision &&
+		    sprom->boardflags2_hi & B43_BFH2_IPALVLSHIFT_3P3) {
+			b43_radio_write(dev, 0x5, 0x05);
+			b43_radio_write(dev, 0x6, 0x30);
+			b43_radio_write(dev, 0x7, 0x00);
+			b43_radio_set(dev, 0x4f, 0x1);
+			b43_radio_set(dev, 0xd4, 0x1);
+			bias = 0x1f;
+			conv = 0x6f;
+			filt = 0xaa;
+		} else {
+			bias = 0x2b;
+			conv = 0x7f;
+			filt = 0xee;
+		}
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+			for (core = 0; core < 2; core++) {
+				if (core == 0) {
+					b43_radio_write(dev, 0x5F, bias);
+					b43_radio_write(dev, 0x64, conv);
+					b43_radio_write(dev, 0x66, filt);
+				} else {
+					b43_radio_write(dev, 0xE8, bias);
+					b43_radio_write(dev, 0xE9, conv);
+					b43_radio_write(dev, 0xEB, filt);
+				}
+			}
+		}
+	}
+
+	if (b43_nphy_ipa(dev)) {
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+			if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+			    phy->radio_rev == 6) {
+				for (core = 0; core < 2; core++) {
+					if (core == 0)
+						b43_radio_write(dev, 0x51,
+								0x7f);
+					else
+						b43_radio_write(dev, 0xd6,
+								0x7f);
+				}
+			}
+			if (phy->radio_rev == 3) {
+				for (core = 0; core < 2; core++) {
+					if (core == 0) {
+						b43_radio_write(dev, 0x64,
+								0x13);
+						b43_radio_write(dev, 0x5F,
+								0x1F);
+						b43_radio_write(dev, 0x66,
+								0xEE);
+						b43_radio_write(dev, 0x59,
+								0x8A);
+						b43_radio_write(dev, 0x80,
+								0x3E);
+					} else {
+						b43_radio_write(dev, 0x69,
+								0x13);
+						b43_radio_write(dev, 0xE8,
+								0x1F);
+						b43_radio_write(dev, 0xEB,
+								0xEE);
+						b43_radio_write(dev, 0xDE,
+								0x8A);
+						b43_radio_write(dev, 0x105,
+								0x3E);
+					}
+				}
+			} else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
+				if (!phy->is_40mhz) {
+					b43_radio_write(dev, 0x5F, 0x14);
+					b43_radio_write(dev, 0xE8, 0x12);
+				} else {
+					b43_radio_write(dev, 0x5F, 0x16);
+					b43_radio_write(dev, 0xE8, 0x16);
+				}
+			}
+		} else {
+			u16 freq = phy->channel_freq;
+			if ((freq >= 5180 && freq <= 5230) ||
+			    (freq >= 5745 && freq <= 5805)) {
+				b43_radio_write(dev, 0x7D, 0xFF);
+				b43_radio_write(dev, 0xFE, 0xFF);
+			}
+		}
+	} else {
+		if (phy->radio_rev != 5) {
+			for (core = 0; core < 2; core++) {
+				if (core == 0) {
+					b43_radio_write(dev, 0x5c, 0x61);
+					b43_radio_write(dev, 0x51, 0x70);
+				} else {
+					b43_radio_write(dev, 0xe1, 0x61);
+					b43_radio_write(dev, 0xd6, 0x70);
+				}
+			}
+		}
+	}
+
+	if (phy->radio_rev == 4) {
+		b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+		for (core = 0; core < 2; core++) {
+			if (core == 0) {
+				b43_radio_write(dev, 0x1a1, 0x00);
+				b43_radio_write(dev, 0x1a2, 0x3f);
+				b43_radio_write(dev, 0x1a6, 0x3f);
+			} else {
+				b43_radio_write(dev, 0x1a7, 0x00);
+				b43_radio_write(dev, 0x1ab, 0x3f);
+				b43_radio_write(dev, 0x1ac, 0x3f);
+			}
+		}
+	} else {
+		b43_phy_set(dev, B43_NPHY_AFECTL_C1, 0x4);
+		b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x4);
+		b43_phy_set(dev, B43_NPHY_AFECTL_C2, 0x4);
+		b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4);
+
+		b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x1);
+		b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1);
+		b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1);
+		b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+
+		b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4);
+		b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4);
+		b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x4);
+		b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4);
+	}
+
+	b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2);
+
+	b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20);
+	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146);
+	b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77);
+	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133);
+	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146);
+	b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
+	b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
+
+	if (!phy->is_40mhz) {
+		b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
+		b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
+	} else {
+		b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D);
+		b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D);
+	}
+
+	b43_nphy_gain_ctl_workarounds(dev);
+
+	/* TODO
+	b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4,
+			    aux_adc_vmid_rev7_core0);
+	b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4,
+			    aux_adc_vmid_rev7_core1);
+	b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0C), 4,
+			    aux_adc_gain_rev7);
+	b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1C), 4,
+			    aux_adc_gain_rev7);
+	*/
+}
+
 static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 {
 	struct b43_phy_n *nphy = dev->phy.n;
@@ -1916,7 +2482,7 @@
 			rx2tx_delays[6] = 1;
 			rx2tx_events[7] = 0x1F;
 		}
-		b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+		b43_nphy_set_rf_sequence(dev, 0, rx2tx_events, rx2tx_delays,
 					 ARRAY_SIZE(rx2tx_events));
 	}
 
@@ -1926,8 +2492,13 @@
 
 	b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
 
-	b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
-	b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+	if (!dev->phy.is_40mhz) {
+		b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+		b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+	} else {
+		b43_ntab_write(dev, B43_NTAB32(16, 3), 0x14D);
+		b43_ntab_write(dev, B43_NTAB32(16, 127), 0x14D);
+	}
 
 	b43_nphy_gain_ctl_workarounds(dev);
 
@@ -1963,13 +2534,14 @@
 	b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
 
 	if (dev->phy.rev == 4 &&
-		b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+	    b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
 		b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
 				0x70);
 		b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
 				0x70);
 	}
 
+	/* Dropped probably-always-true condition */
 	b43_phy_write(dev, 0x224, 0x03eb);
 	b43_phy_write(dev, 0x225, 0x03eb);
 	b43_phy_write(dev, 0x226, 0x0341);
@@ -1982,6 +2554,9 @@
 	b43_phy_write(dev, 0x22d, 0x042b);
 	b43_phy_write(dev, 0x22e, 0x0381);
 	b43_phy_write(dev, 0x22f, 0x0381);
+
+	if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
+		; /* TODO: 0x0080000000000000 HF */
 }
 
 static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -1996,6 +2571,12 @@
 	u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
 	u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
 
+	if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
+	    dev->dev->board_type == 0x8B) {
+		delays1[0] = 0x1;
+		delays1[5] = 0x14;
+	}
+
 	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
 	    nphy->band5g_pwrgain) {
 		b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
@@ -2007,8 +2588,10 @@
 
 	b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
 	b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
-	b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
-	b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+	if (dev->phy.rev < 3) {
+		b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+	}
 
 	if (dev->phy.rev < 2) {
 		b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
@@ -2024,11 +2607,6 @@
 	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
 	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
 
-	if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
-	    dev->dev->board_type == 0x8B) {
-		delays1[0] = 0x1;
-		delays1[5] = 0x14;
-	}
 	b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
 	b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
 
@@ -2055,11 +2633,13 @@
 	b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
 	b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
 
-	b43_phy_mask(dev, B43_NPHY_PIL_DW1,
-			~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
-	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
-	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
-	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+	if (dev->phy.rev < 3) {
+		b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+			     ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+		b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+		b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+		b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+	}
 
 	if (dev->phy.rev == 2)
 		b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
@@ -2083,7 +2663,9 @@
 	b43_phy_set(dev, B43_NPHY_IQFLIP,
 		    B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
 
-	if (dev->phy.rev >= 3)
+	if (dev->phy.rev >= 7)
+		b43_nphy_workarounds_rev7plus(dev);
+	else if (dev->phy.rev >= 3)
 		b43_nphy_workarounds_rev3plus(dev);
 	else
 		b43_nphy_workarounds_rev1_2(dev);
@@ -2542,7 +3124,7 @@
 		b43_nphy_ipa_internal_tssi_setup(dev);
 
 	if (phy->rev >= 7)
-		; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+		b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0);
 	else if (phy->rev >= 3)
 		b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
 
@@ -2554,7 +3136,7 @@
 	b43_nphy_rssi_select(dev, 0, 0);
 
 	if (phy->rev >= 7)
-		; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+		b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0);
 	else if (phy->rev >= 3)
 		b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
 
@@ -4761,6 +5343,7 @@
 	nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
 	nphy->spur_avoid = (phy->rev >= 3) ?
 				B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
+	nphy->init_por = true;
 	nphy->gain_boost = true; /* this way we follow wl, assume it is true */
 	nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
 	nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4801,6 +5384,8 @@
 		nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
 		nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
 	}
+
+	nphy->init_por = true;
 }
 
 static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -4887,7 +5472,9 @@
 	if (blocked) {
 		b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
 				~B43_NPHY_RFCTL_CMD_CHIP0PU);
-		if (dev->phy.rev >= 3) {
+		if (dev->phy.rev >= 7) {
+			/* TODO */
+		} else if (dev->phy.rev >= 3) {
 			b43_radio_mask(dev, 0x09, ~0x2);
 
 			b43_radio_write(dev, 0x204D, 0);
@@ -4905,7 +5492,10 @@
 			b43_radio_write(dev, 0x3064, 0);
 		}
 	} else {
-		if (dev->phy.rev >= 3) {
+		if (dev->phy.rev >= 7) {
+			b43_radio_2057_init(dev);
+			b43_switch_channel(dev, dev->phy.channel);
+		} else if (dev->phy.rev >= 3) {
 			b43_radio_init2056(dev);
 			b43_switch_channel(dev, dev->phy.channel);
 		} else {
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fd12b38..092c014 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -785,6 +785,7 @@
 	u16 papd_epsilon_offset[2];
 	s32 preamble_override;
 	u32 bb_mult_save;
+	bool init_por;
 
 	bool gain_boost;
 	bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c
new file mode 100644
index 0000000..d61d683
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2057.c
@@ -0,0 +1,141 @@
+/*
+
+  Broadcom B43 wireless driver
+  IEEE 802.11n 2057 radio device data tables
+
+  Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+  Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "radio_2057.h"
+#include "phy_common.h"
+
+static u16 r2057_rev4_init[42][2] = {
+	{ 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 },
+	{ 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff },
+	{ 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 },
+	{ 0x8C, 0xf0 }, { 0x91, 0x3f }, { 0x92, 0x36 }, { 0xA4, 0x8c },
+	{ 0xA8, 0x55 }, { 0xAF, 0x01 }, { 0x10F, 0xf0 }, { 0x110, 0x10 },
+	{ 0x111, 0xf0 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x129, 0x8c },
+	{ 0x12D, 0x55 }, { 0x134, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+	{ 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+	{ 0x169, 0x02 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+	{ 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+	{ 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+};
+
+static u16 r2057_rev5_init[44][2] = {
+	{ 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+	{ 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+	{ 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+	{ 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+	{ 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+	{ 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+	{ 0x117, 0x36 }, { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+	{ 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+	{ 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 },
+	{ 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 },
+	{ 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev5a_init[45][2] = {
+	{ 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+	{ 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+	{ 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+	{ 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+	{ 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+	{ 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+	{ 0x117, 0x36 }, { 0x126, 0x20 }, { 0x14E, 0x01 }, { 0x15E, 0x00 },
+	{ 0x15F, 0x00 }, { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 },
+	{ 0x163, 0x00 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+	{ 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+	{ 0x1AB, 0x00 }, { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 },
+	{ 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev7_init[54][2] = {
+	{ 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+	{ 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+	{ 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 },
+	{ 0x66, 0xee }, { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 },
+	{ 0x7C, 0x14 }, { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f },
+	{ 0x92, 0x36 }, { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+	{ 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x13 }, { 0xEB, 0xee },
+	{ 0xF3, 0x58 }, { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x14 },
+	{ 0x102, 0xee }, { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 },
+	{ 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+	{ 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+	{ 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+	{ 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+	{ 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+static u16 r2057_rev8_init[54][2] = {
+	{ 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+	{ 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+	{ 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f },
+	{ 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, { 0x7C, 0x0f },
+	{ 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+	{ 0xA1, 0x20 }, { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+	{ 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0xF3, 0x58 },
+	{ 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x0f }, { 0x102, 0xee },
+	{ 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x126, 0x20 },
+	{ 0x14E, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+	{ 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+	{ 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+	{ 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+	{ 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+void r2057_upload_inittabs(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	u16 *table = NULL;
+	u16 size, i;
+
+	if (phy->rev == 7) {
+		table = r2057_rev4_init[0];
+		size = ARRAY_SIZE(r2057_rev4_init);
+	} else if (phy->rev == 8 || phy->rev == 9) {
+		if (phy->radio_rev == 5) {
+			if (phy->radio_rev == 8) {
+				table = r2057_rev5_init[0];
+				size = ARRAY_SIZE(r2057_rev5_init);
+			} else {
+				table = r2057_rev5a_init[0];
+				size = ARRAY_SIZE(r2057_rev5a_init);
+			}
+		} else if (phy->radio_rev == 7) {
+			table = r2057_rev7_init[0];
+			size = ARRAY_SIZE(r2057_rev7_init);
+		} else if (phy->radio_rev == 9) {
+			table = r2057_rev8_init[0];
+			size = ARRAY_SIZE(r2057_rev8_init);
+		}
+	}
+
+	if (table) {
+		for (i = 0; i < 10; i++) {
+			pr_info("radio_write 0x%X ", *table);
+			table++;
+			pr_info("0x%X\n", *table);
+			table++;
+		}
+	}
+}
diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h
new file mode 100644
index 0000000..eeebd8f
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2057.h
@@ -0,0 +1,430 @@
+#ifndef B43_RADIO_2057_H_
+#define B43_RADIO_2057_H_
+
+#include <linux/types.h>
+
+#include "tables_nphy.h"
+
+#define R2057_DACBUF_VINCM_CORE0		0x000
+#define R2057_IDCODE				0x001
+#define R2057_RCCAL_MASTER			0x002
+#define R2057_RCCAL_CAP_SIZE			0x003
+#define R2057_RCAL_CONFIG			0x004
+#define R2057_GPAIO_CONFIG			0x005
+#define R2057_GPAIO_SEL1			0x006
+#define R2057_GPAIO_SEL0			0x007
+#define R2057_CLPO_CONFIG			0x008
+#define R2057_BANDGAP_CONFIG			0x009
+#define R2057_BANDGAP_RCAL_TRIM			0x00a
+#define R2057_AFEREG_CONFIG			0x00b
+#define R2057_TEMPSENSE_CONFIG			0x00c
+#define R2057_XTAL_CONFIG1			0x00d
+#define R2057_XTAL_ICORE_SIZE			0x00e
+#define R2057_XTAL_BUF_SIZE			0x00f
+#define R2057_XTAL_PULLCAP_SIZE			0x010
+#define R2057_RFPLL_MASTER			0x011
+#define R2057_VCOMONITOR_VTH_L			0x012
+#define R2057_VCOMONITOR_VTH_H			0x013
+#define R2057_VCOCAL_BIASRESET_RFPLLREG_VOUT	0x014
+#define R2057_VCO_VARCSIZE_IDAC			0x015
+#define R2057_VCOCAL_COUNTVAL0			0x016
+#define R2057_VCOCAL_COUNTVAL1			0x017
+#define R2057_VCOCAL_INTCLK_COUNT		0x018
+#define R2057_VCOCAL_MASTER			0x019
+#define R2057_VCOCAL_NUMCAPCHANGE		0x01a
+#define R2057_VCOCAL_WINSIZE			0x01b
+#define R2057_VCOCAL_DELAY_AFTER_REFRESH	0x01c
+#define R2057_VCOCAL_DELAY_AFTER_CLOSELOOP	0x01d
+#define R2057_VCOCAL_DELAY_AFTER_OPENLOOP	0x01e
+#define R2057_VCOCAL_DELAY_BEFORE_OPENLOOP	0x01f
+#define R2057_VCO_FORCECAPEN_FORCECAP1		0x020
+#define R2057_VCO_FORCECAP0			0x021
+#define R2057_RFPLL_REFMASTER_SPAREXTALSIZE	0x022
+#define R2057_RFPLL_PFD_RESET_PW		0x023
+#define R2057_RFPLL_LOOPFILTER_R2		0x024
+#define R2057_RFPLL_LOOPFILTER_R1		0x025
+#define R2057_RFPLL_LOOPFILTER_C3		0x026
+#define R2057_RFPLL_LOOPFILTER_C2		0x027
+#define R2057_RFPLL_LOOPFILTER_C1		0x028
+#define R2057_CP_KPD_IDAC			0x029
+#define R2057_RFPLL_IDACS			0x02a
+#define R2057_RFPLL_MISC_EN			0x02b
+#define R2057_RFPLL_MMD0			0x02c
+#define R2057_RFPLL_MMD1			0x02d
+#define R2057_RFPLL_MISC_CAL_RESETN		0x02e
+#define R2057_JTAGXTAL_SIZE_CPBIAS_FILTRES	0x02f
+#define R2057_VCO_ALCREF_BBPLLXTAL_SIZE		0x030
+#define R2057_VCOCAL_READCAP0			0x031
+#define R2057_VCOCAL_READCAP1			0x032
+#define R2057_VCOCAL_STATUS			0x033
+#define R2057_LOGEN_PUS				0x034
+#define R2057_LOGEN_PTAT_RESETS			0x035
+#define R2057_VCOBUF_IDACS			0x036
+#define R2057_VCOBUF_TUNE			0x037
+#define R2057_CMOSBUF_TX2GQ_IDACS		0x038
+#define R2057_CMOSBUF_TX2GI_IDACS		0x039
+#define R2057_CMOSBUF_TX5GQ_IDACS		0x03a
+#define R2057_CMOSBUF_TX5GI_IDACS		0x03b
+#define R2057_CMOSBUF_RX2GQ_IDACS		0x03c
+#define R2057_CMOSBUF_RX2GI_IDACS		0x03d
+#define R2057_CMOSBUF_RX5GQ_IDACS		0x03e
+#define R2057_CMOSBUF_RX5GI_IDACS		0x03f
+#define R2057_LOGEN_MX2G_IDACS			0x040
+#define R2057_LOGEN_MX2G_TUNE			0x041
+#define R2057_LOGEN_MX5G_IDACS			0x042
+#define R2057_LOGEN_MX5G_TUNE			0x043
+#define R2057_LOGEN_MX5G_RCCR			0x044
+#define R2057_LOGEN_INDBUF2G_IDAC		0x045
+#define R2057_LOGEN_INDBUF2G_IBOOST		0x046
+#define R2057_LOGEN_INDBUF2G_TUNE		0x047
+#define R2057_LOGEN_INDBUF5G_IDAC		0x048
+#define R2057_LOGEN_INDBUF5G_IBOOST		0x049
+#define R2057_LOGEN_INDBUF5G_TUNE		0x04a
+#define R2057_CMOSBUF_TX_RCCR			0x04b
+#define R2057_CMOSBUF_RX_RCCR			0x04c
+#define R2057_LOGEN_SEL_PKDET			0x04d
+#define R2057_CMOSBUF_SHAREIQ_PTAT		0x04e
+#define R2057_RXTXBIAS_CONFIG_CORE0		0x04f
+#define R2057_TXGM_TXRF_PUS_CORE0		0x050
+#define R2057_TXGM_IDAC_BLEED_CORE0		0x051
+#define R2057_TXGM_GAIN_CORE0			0x056
+#define R2057_TXGM2G_PKDET_PUS_CORE0		0x057
+#define R2057_PAD2G_PTATS_CORE0			0x058
+#define R2057_PAD2G_IDACS_CORE0			0x059
+#define R2057_PAD2G_BOOST_PU_CORE0		0x05a
+#define R2057_PAD2G_CASCV_GAIN_CORE0		0x05b
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE0	0x05c
+#define R2057_TXMIX2G_LODC_CORE0		0x05d
+#define R2057_PAD2G_TUNE_PUS_CORE0		0x05e
+#define R2057_IPA2G_GAIN_CORE0			0x05f
+#define R2057_TSSI2G_SPARE1_CORE0		0x060
+#define R2057_TSSI2G_SPARE2_CORE0		0x061
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE0	0x062
+#define R2057_IPA2G_IMAIN_CORE0			0x063
+#define R2057_IPA2G_CASCONV_CORE0		0x064
+#define R2057_IPA2G_CASCOFFV_CORE0		0x065
+#define R2057_IPA2G_BIAS_FILTER_CORE0		0x066
+#define R2057_TX5G_PKDET_CORE0			0x069
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE0		0x06a
+#define R2057_PAD5G_PTATS1_CORE0		0x06b
+#define R2057_PAD5G_CLASS_PTATS2_CORE0		0x06c
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE0		0x06d
+#define R2057_PAD5G_CASCV_IMAIN_CORE0		0x06e
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE0	0x06f
+#define R2057_PGA_BOOST_TUNE_CORE0		0x070
+#define R2057_PGA_GAIN_CORE0			0x071
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE0	0x072
+#define R2057_TXMIX5G_BOOST_TUNE_CORE0		0x073
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE0		0x074
+#define R2057_IPA5G_IAUX_CORE0			0x075
+#define R2057_IPA5G_GAIN_CORE0			0x076
+#define R2057_TSSI5G_SPARE1_CORE0		0x077
+#define R2057_TSSI5G_SPARE2_CORE0		0x078
+#define R2057_IPA5G_CASCOFFV_PU_CORE0		0x079
+#define R2057_IPA5G_PTAT_CORE0			0x07a
+#define R2057_IPA5G_IMAIN_CORE0			0x07b
+#define R2057_IPA5G_CASCONV_CORE0		0x07c
+#define R2057_IPA5G_BIAS_FILTER_CORE0		0x07d
+#define R2057_PAD_BIAS_FILTER_BWS_CORE0		0x080
+#define R2057_TR2G_CONFIG1_CORE0_NU		0x081
+#define R2057_TR2G_CONFIG2_CORE0_NU		0x082
+#define R2057_LNA5G_RFEN_CORE0			0x083
+#define R2057_TR5G_CONFIG2_CORE0_NU		0x084
+#define R2057_RXRFBIAS_IBOOST_PU_CORE0		0x085
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE0	0x086
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE0	0x087
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE0	0x088
+#define R2057_RXMIX_CMFBITAIL_PU_CORE0		0x089
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE0		0x08a
+#define R2057_LNA2_IAUX_PTAT_CORE0		0x08b
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE0		0x08c
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE0	0x08d
+#define R2057_RXRFBIAS_BANDSEL_CORE0		0x08e
+#define R2057_TIA_CONFIG_CORE0			0x08f
+#define R2057_TIA_IQGAIN_CORE0			0x090
+#define R2057_TIA_IBIAS2_CORE0			0x091
+#define R2057_TIA_IBIAS1_CORE0			0x092
+#define R2057_TIA_SPARE_Q_CORE0			0x093
+#define R2057_TIA_SPARE_I_CORE0			0x094
+#define R2057_RXMIX2G_PUS_CORE0			0x095
+#define R2057_RXMIX2G_VCMREFS_CORE0		0x096
+#define R2057_RXMIX2G_LODC_QI_CORE0		0x097
+#define R2057_W12G_BW_LNA2G_PUS_CORE0		0x098
+#define R2057_LNA2G_GAIN_CORE0			0x099
+#define R2057_LNA2G_TUNE_CORE0			0x09a
+#define R2057_RXMIX5G_PUS_CORE0			0x09b
+#define R2057_RXMIX5G_VCMREFS_CORE0		0x09c
+#define R2057_RXMIX5G_LODC_QI_CORE0		0x09d
+#define R2057_W15G_BW_LNA5G_PUS_CORE0		0x09e
+#define R2057_LNA5G_GAIN_CORE0			0x09f
+#define R2057_LNA5G_TUNE_CORE0			0x0a0
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE0	0x0a1
+#define R2057_RXBB_BIAS_MASTER_CORE0		0x0a2
+#define R2057_RXBB_VGABUF_IDACS_CORE0		0x0a3
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE0	0x0a4
+#define R2057_TXBUF_VINCM_CORE0			0x0a5
+#define R2057_TXBUF_IDACS_CORE0			0x0a6
+#define R2057_LPF_RESP_RXBUF_BW_CORE0		0x0a7
+#define R2057_RXBB_CC_CORE0			0x0a8
+#define R2057_RXBB_SPARE3_CORE0			0x0a9
+#define R2057_RXBB_RCCAL_HPC_CORE0		0x0aa
+#define R2057_LPF_IDACS_CORE0			0x0ab
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE0	0x0ac
+#define R2057_TXBUF_GAIN_CORE0			0x0ad
+#define R2057_AFELOOPBACK_AACI_RESP_CORE0	0x0ae
+#define R2057_RXBUF_DEGEN_CORE0			0x0af
+#define R2057_RXBB_SPARE2_CORE0			0x0b0
+#define R2057_RXBB_SPARE1_CORE0			0x0b1
+#define R2057_RSSI_MASTER_CORE0			0x0b2
+#define R2057_W2_MASTER_CORE0			0x0b3
+#define R2057_NB_MASTER_CORE0			0x0b4
+#define R2057_W2_IDACS0_Q_CORE0			0x0b5
+#define R2057_W2_IDACS1_Q_CORE0			0x0b6
+#define R2057_W2_IDACS0_I_CORE0			0x0b7
+#define R2057_W2_IDACS1_I_CORE0			0x0b8
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE0	0x0b9
+#define R2057_NB_IDACS_Q_CORE0			0x0ba
+#define R2057_NB_IDACS_I_CORE0			0x0bb
+#define R2057_BACKUP4_CORE0			0x0c1
+#define R2057_BACKUP3_CORE0			0x0c2
+#define R2057_BACKUP2_CORE0			0x0c3
+#define R2057_BACKUP1_CORE0			0x0c4
+#define R2057_SPARE16_CORE0			0x0c5
+#define R2057_SPARE15_CORE0			0x0c6
+#define R2057_SPARE14_CORE0			0x0c7
+#define R2057_SPARE13_CORE0			0x0c8
+#define R2057_SPARE12_CORE0			0x0c9
+#define R2057_SPARE11_CORE0			0x0ca
+#define R2057_TX2G_BIAS_RESETS_CORE0		0x0cb
+#define R2057_TX5G_BIAS_RESETS_CORE0		0x0cc
+#define R2057_IQTEST_SEL_PU			0x0cd
+#define R2057_XTAL_CONFIG2			0x0ce
+#define R2057_BUFS_MISC_LPFBW_CORE0		0x0cf
+#define R2057_TXLPF_RCCAL_CORE0			0x0d0
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0	0x0d1
+#define R2057_LPF_GAIN_CORE0			0x0d2
+#define R2057_DACBUF_IDACS_BW_CORE0		0x0d3
+#define R2057_RXTXBIAS_CONFIG_CORE1		0x0d4
+#define R2057_TXGM_TXRF_PUS_CORE1		0x0d5
+#define R2057_TXGM_IDAC_BLEED_CORE1		0x0d6
+#define R2057_TXGM_GAIN_CORE1			0x0db
+#define R2057_TXGM2G_PKDET_PUS_CORE1		0x0dc
+#define R2057_PAD2G_PTATS_CORE1			0x0dd
+#define R2057_PAD2G_IDACS_CORE1			0x0de
+#define R2057_PAD2G_BOOST_PU_CORE1		0x0df
+#define R2057_PAD2G_CASCV_GAIN_CORE1		0x0e0
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE1	0x0e1
+#define R2057_TXMIX2G_LODC_CORE1		0x0e2
+#define R2057_PAD2G_TUNE_PUS_CORE1		0x0e3
+#define R2057_IPA2G_GAIN_CORE1			0x0e4
+#define R2057_TSSI2G_SPARE1_CORE1		0x0e5
+#define R2057_TSSI2G_SPARE2_CORE1		0x0e6
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE1	0x0e7
+#define R2057_IPA2G_IMAIN_CORE1			0x0e8
+#define R2057_IPA2G_CASCONV_CORE1		0x0e9
+#define R2057_IPA2G_CASCOFFV_CORE1		0x0ea
+#define R2057_IPA2G_BIAS_FILTER_CORE1		0x0eb
+#define R2057_TX5G_PKDET_CORE1			0x0ee
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE1		0x0ef
+#define R2057_PAD5G_PTATS1_CORE1		0x0f0
+#define R2057_PAD5G_CLASS_PTATS2_CORE1		0x0f1
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE1		0x0f2
+#define R2057_PAD5G_CASCV_IMAIN_CORE1		0x0f3
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE1	0x0f4
+#define R2057_PGA_BOOST_TUNE_CORE1		0x0f5
+#define R2057_PGA_GAIN_CORE1			0x0f6
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE1	0x0f7
+#define R2057_TXMIX5G_BOOST_TUNE_CORE1		0x0f8
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE1		0x0f9
+#define R2057_IPA5G_IAUX_CORE1			0x0fa
+#define R2057_IPA5G_GAIN_CORE1			0x0fb
+#define R2057_TSSI5G_SPARE1_CORE1		0x0fc
+#define R2057_TSSI5G_SPARE2_CORE1		0x0fd
+#define R2057_IPA5G_CASCOFFV_PU_CORE1		0x0fe
+#define R2057_IPA5G_PTAT_CORE1			0x0ff
+#define R2057_IPA5G_IMAIN_CORE1			0x100
+#define R2057_IPA5G_CASCONV_CORE1		0x101
+#define R2057_IPA5G_BIAS_FILTER_CORE1		0x102
+#define R2057_PAD_BIAS_FILTER_BWS_CORE1		0x105
+#define R2057_TR2G_CONFIG1_CORE1_NU		0x106
+#define R2057_TR2G_CONFIG2_CORE1_NU		0x107
+#define R2057_LNA5G_RFEN_CORE1			0x108
+#define R2057_TR5G_CONFIG2_CORE1_NU		0x109
+#define R2057_RXRFBIAS_IBOOST_PU_CORE1		0x10a
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE1	0x10b
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE1	0x10c
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE1	0x10d
+#define R2057_RXMIX_CMFBITAIL_PU_CORE1		0x10e
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE1		0x10f
+#define R2057_LNA2_IAUX_PTAT_CORE1		0x110
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE1		0x111
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE1	0x112
+#define R2057_RXRFBIAS_BANDSEL_CORE1		0x113
+#define R2057_TIA_CONFIG_CORE1			0x114
+#define R2057_TIA_IQGAIN_CORE1			0x115
+#define R2057_TIA_IBIAS2_CORE1			0x116
+#define R2057_TIA_IBIAS1_CORE1			0x117
+#define R2057_TIA_SPARE_Q_CORE1			0x118
+#define R2057_TIA_SPARE_I_CORE1			0x119
+#define R2057_RXMIX2G_PUS_CORE1			0x11a
+#define R2057_RXMIX2G_VCMREFS_CORE1		0x11b
+#define R2057_RXMIX2G_LODC_QI_CORE1		0x11c
+#define R2057_W12G_BW_LNA2G_PUS_CORE1		0x11d
+#define R2057_LNA2G_GAIN_CORE1			0x11e
+#define R2057_LNA2G_TUNE_CORE1			0x11f
+#define R2057_RXMIX5G_PUS_CORE1			0x120
+#define R2057_RXMIX5G_VCMREFS_CORE1		0x121
+#define R2057_RXMIX5G_LODC_QI_CORE1		0x122
+#define R2057_W15G_BW_LNA5G_PUS_CORE1		0x123
+#define R2057_LNA5G_GAIN_CORE1			0x124
+#define R2057_LNA5G_TUNE_CORE1			0x125
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE1	0x126
+#define R2057_RXBB_BIAS_MASTER_CORE1		0x127
+#define R2057_RXBB_VGABUF_IDACS_CORE1		0x128
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE1	0x129
+#define R2057_TXBUF_VINCM_CORE1			0x12a
+#define R2057_TXBUF_IDACS_CORE1			0x12b
+#define R2057_LPF_RESP_RXBUF_BW_CORE1		0x12c
+#define R2057_RXBB_CC_CORE1			0x12d
+#define R2057_RXBB_SPARE3_CORE1			0x12e
+#define R2057_RXBB_RCCAL_HPC_CORE1		0x12f
+#define R2057_LPF_IDACS_CORE1			0x130
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE1	0x131
+#define R2057_TXBUF_GAIN_CORE1			0x132
+#define R2057_AFELOOPBACK_AACI_RESP_CORE1	0x133
+#define R2057_RXBUF_DEGEN_CORE1			0x134
+#define R2057_RXBB_SPARE2_CORE1			0x135
+#define R2057_RXBB_SPARE1_CORE1			0x136
+#define R2057_RSSI_MASTER_CORE1			0x137
+#define R2057_W2_MASTER_CORE1			0x138
+#define R2057_NB_MASTER_CORE1			0x139
+#define R2057_W2_IDACS0_Q_CORE1			0x13a
+#define R2057_W2_IDACS1_Q_CORE1			0x13b
+#define R2057_W2_IDACS0_I_CORE1			0x13c
+#define R2057_W2_IDACS1_I_CORE1			0x13d
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE1	0x13e
+#define R2057_NB_IDACS_Q_CORE1			0x13f
+#define R2057_NB_IDACS_I_CORE1			0x140
+#define R2057_BACKUP4_CORE1			0x146
+#define R2057_BACKUP3_CORE1			0x147
+#define R2057_BACKUP2_CORE1			0x148
+#define R2057_BACKUP1_CORE1			0x149
+#define R2057_SPARE16_CORE1			0x14a
+#define R2057_SPARE15_CORE1			0x14b
+#define R2057_SPARE14_CORE1			0x14c
+#define R2057_SPARE13_CORE1			0x14d
+#define R2057_SPARE12_CORE1			0x14e
+#define R2057_SPARE11_CORE1			0x14f
+#define R2057_TX2G_BIAS_RESETS_CORE1		0x150
+#define R2057_TX5G_BIAS_RESETS_CORE1		0x151
+#define R2057_SPARE8_CORE1			0x152
+#define R2057_SPARE7_CORE1			0x153
+#define R2057_BUFS_MISC_LPFBW_CORE1		0x154
+#define R2057_TXLPF_RCCAL_CORE1			0x155
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1	0x156
+#define R2057_LPF_GAIN_CORE1			0x157
+#define R2057_DACBUF_IDACS_BW_CORE1		0x158
+#define R2057_DACBUF_VINCM_CORE1		0x159
+#define R2057_RCCAL_START_R1_Q1_P1		0x15a
+#define R2057_RCCAL_X1				0x15b
+#define R2057_RCCAL_TRC0			0x15c
+#define R2057_RCCAL_TRC1			0x15d
+#define R2057_RCCAL_DONE_OSCCAP			0x15e
+#define R2057_RCCAL_N0_0			0x15f
+#define R2057_RCCAL_N0_1			0x160
+#define R2057_RCCAL_N1_0			0x161
+#define R2057_RCCAL_N1_1			0x162
+#define R2057_RCAL_STATUS			0x163
+#define R2057_XTALPUOVR_PINCTRL			0x164
+#define R2057_OVR_REG0				0x165
+#define R2057_OVR_REG1				0x166
+#define R2057_OVR_REG2				0x167
+#define R2057_OVR_REG3				0x168
+#define R2057_OVR_REG4				0x169
+#define R2057_RCCAL_SCAP_VAL			0x16a
+#define R2057_RCCAL_BCAP_VAL			0x16b
+#define R2057_RCCAL_HPC_VAL			0x16c
+#define R2057_RCCAL_OVERRIDES			0x16d
+#define R2057_TX0_IQCAL_GAIN_BW			0x170
+#define R2057_TX0_LOFT_FINE_I			0x171
+#define R2057_TX0_LOFT_FINE_Q			0x172
+#define R2057_TX0_LOFT_COARSE_I			0x173
+#define R2057_TX0_LOFT_COARSE_Q			0x174
+#define R2057_TX0_TX_SSI_MASTER			0x175
+#define R2057_TX0_IQCAL_VCM_HG			0x176
+#define R2057_TX0_IQCAL_IDAC			0x177
+#define R2057_TX0_TSSI_VCM			0x178
+#define R2057_TX0_TX_SSI_MUX			0x179
+#define R2057_TX0_TSSIA				0x17a
+#define R2057_TX0_TSSIG				0x17b
+#define R2057_TX0_TSSI_MISC1			0x17c
+#define R2057_TX0_TXRXCOUPLE_2G_ATTEN		0x17d
+#define R2057_TX0_TXRXCOUPLE_2G_PWRUP		0x17e
+#define R2057_TX0_TXRXCOUPLE_5G_ATTEN		0x17f
+#define R2057_TX0_TXRXCOUPLE_5G_PWRUP		0x180
+#define R2057_TX1_IQCAL_GAIN_BW			0x190
+#define R2057_TX1_LOFT_FINE_I			0x191
+#define R2057_TX1_LOFT_FINE_Q			0x192
+#define R2057_TX1_LOFT_COARSE_I			0x193
+#define R2057_TX1_LOFT_COARSE_Q			0x194
+#define R2057_TX1_TX_SSI_MASTER			0x195
+#define R2057_TX1_IQCAL_VCM_HG			0x196
+#define R2057_TX1_IQCAL_IDAC			0x197
+#define R2057_TX1_TSSI_VCM			0x198
+#define R2057_TX1_TX_SSI_MUX			0x199
+#define R2057_TX1_TSSIA				0x19a
+#define R2057_TX1_TSSIG				0x19b
+#define R2057_TX1_TSSI_MISC1			0x19c
+#define R2057_TX1_TXRXCOUPLE_2G_ATTEN		0x19d
+#define R2057_TX1_TXRXCOUPLE_2G_PWRUP		0x19e
+#define R2057_TX1_TXRXCOUPLE_5G_ATTEN		0x19f
+#define R2057_TX1_TXRXCOUPLE_5G_PWRUP		0x1a0
+#define R2057_AFE_VCM_CAL_MASTER_CORE0		0x1a1
+#define R2057_AFE_SET_VCM_I_CORE0		0x1a2
+#define R2057_AFE_SET_VCM_Q_CORE0		0x1a3
+#define R2057_AFE_STATUS_VCM_IQADC_CORE0	0x1a4
+#define R2057_AFE_STATUS_VCM_I_CORE0		0x1a5
+#define R2057_AFE_STATUS_VCM_Q_CORE0		0x1a6
+#define R2057_AFE_VCM_CAL_MASTER_CORE1		0x1a7
+#define R2057_AFE_SET_VCM_I_CORE1		0x1a8
+#define R2057_AFE_SET_VCM_Q_CORE1		0x1a9
+#define R2057_AFE_STATUS_VCM_IQADC_CORE1	0x1aa
+#define R2057_AFE_STATUS_VCM_I_CORE1		0x1ab
+#define R2057_AFE_STATUS_VCM_Q_CORE1		0x1ac
+
+#define R2057v7_DACBUF_VINCM_CORE0		0x1ad
+#define R2057v7_RCCAL_MASTER			0x1ae
+#define R2057v7_TR2G_CONFIG3_CORE0_NU		0x1af
+#define R2057v7_TR2G_CONFIG3_CORE1_NU		0x1b0
+#define R2057v7_LOGEN_PUS1			0x1b1
+#define R2057v7_OVR_REG5			0x1b2
+#define R2057v7_OVR_REG6			0x1b3
+#define R2057v7_OVR_REG7			0x1b4
+#define R2057v7_OVR_REG8			0x1b5
+#define R2057v7_OVR_REG9			0x1b6
+#define R2057v7_OVR_REG10			0x1b7
+#define R2057v7_OVR_REG11			0x1b8
+#define R2057v7_OVR_REG12			0x1b9
+#define R2057v7_OVR_REG13			0x1ba
+#define R2057v7_OVR_REG14			0x1bb
+#define R2057v7_OVR_REG15			0x1bc
+#define R2057v7_OVR_REG16			0x1bd
+#define R2057v7_OVR_REG1			0x1be
+#define R2057v7_OVR_REG18			0x1bf
+#define R2057v7_OVR_REG19			0x1c0
+#define R2057v7_OVR_REG20			0x1c1
+#define R2057v7_OVR_REG21			0x1c2
+#define R2057v7_OVR_REG2			0x1c3
+#define R2057v7_OVR_REG23			0x1c4
+#define R2057v7_OVR_REG24			0x1c5
+#define R2057v7_OVR_REG25			0x1c6
+#define R2057v7_OVR_REG26			0x1c7
+#define R2057v7_OVR_REG27			0x1c8
+#define R2057v7_OVR_REG28			0x1c9
+#define R2057v7_IQTEST_SEL_PU2			0x1ca
+
+#define R2057_VCM_MASK				0x7
+
+void r2057_upload_inittabs(struct b43_wldev *dev);
+
+#endif /* B43_RADIO_2057_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index f0d8377..97d4e27 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2757,6 +2757,49 @@
 	{ 0x00C0,  6, 0xE7, 0xF9, 0xEC, 0xFB }  /* field == 0x4000 (fls 15) */
 };
 
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+			tbl_rf_control_override_rev7_over0[] = {
+	{ 0x0004, 0x07A, 0x07D, 0x0002, 1 },
+	{ 0x0008, 0x07A, 0x07D, 0x0004, 2 },
+	{ 0x0010, 0x07A, 0x07D, 0x0010, 4 },
+	{ 0x0020, 0x07A, 0x07D, 0x0020, 5 },
+	{ 0x0040, 0x07A, 0x07D, 0x0040, 6 },
+	{ 0x0080, 0x0F8, 0x0FA, 0x0080, 7 },
+	{ 0x0400, 0x0F8, 0x0FA, 0x0070, 4 },
+	{ 0x0800, 0x07B, 0x07E, 0xFFFF, 0 },
+	{ 0x1000, 0x07C, 0x07F, 0xFFFF, 0 },
+	{ 0x6000, 0x348, 0x349, 0xFFFF, 0 },
+	{ 0x2000, 0x348, 0x349, 0x000F, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+			tbl_rf_control_override_rev7_over1[] = {
+	{ 0x0002, 0x340, 0x341, 0x0002, 1 },
+	{ 0x0008, 0x340, 0x341, 0x0008, 3 },
+	{ 0x0020, 0x340, 0x341, 0x0020, 5 },
+	{ 0x0010, 0x340, 0x341, 0x0010, 4 },
+	{ 0x0004, 0x340, 0x341, 0x0004, 2 },
+	{ 0x0080, 0x340, 0x341, 0x0700, 8 },
+	{ 0x0800, 0x340, 0x341, 0x4000, 14 },
+	{ 0x0400, 0x340, 0x341, 0x2000, 13 },
+	{ 0x0200, 0x340, 0x341, 0x0800, 12 },
+	{ 0x0100, 0x340, 0x341, 0x0100, 11 },
+	{ 0x0040, 0x340, 0x341, 0x0040, 6 },
+	{ 0x0001, 0x340, 0x341, 0x0001, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+			tbl_rf_control_override_rev7_over2[] = {
+	{ 0x0008, 0x344, 0x345, 0x0008, 3 },
+	{ 0x0002, 0x344, 0x345, 0x0002, 1 },
+	{ 0x0001, 0x344, 0x345, 0x0001, 0 },
+	{ 0x0004, 0x344, 0x345, 0x0004, 2 },
+	{ 0x0010, 0x344, 0x345, 0x0010, 4 },
+};
+
 struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
 	{ 10, 14, 19, 27 },
 	{ -5, 6, 10, 15 },
@@ -3248,3 +3291,35 @@
 
 	return e;
 }
+
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+	struct b43_wldev *dev, u16 field, u8 override)
+{
+	const struct nphy_rf_control_override_rev7 *e;
+	u8 size, i;
+
+	switch (override) {
+	case 0:
+		e = tbl_rf_control_override_rev7_over0;
+		size = ARRAY_SIZE(tbl_rf_control_override_rev7_over0);
+		break;
+	case 1:
+		e = tbl_rf_control_override_rev7_over1;
+		size = ARRAY_SIZE(tbl_rf_control_override_rev7_over1);
+		break;
+	case 2:
+		e = tbl_rf_control_override_rev7_over2;
+		size = ARRAY_SIZE(tbl_rf_control_override_rev7_over2);
+		break;
+	default:
+		b43err(dev->wl, "Invalid override value %d\n", override);
+		return NULL;
+	}
+
+	for (i = 0; i < size; i++) {
+		if (e[i].field == field)
+			return &e[i];
+	}
+
+	return NULL;
+}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index f348953..c600700 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -35,6 +35,14 @@
 	u8 val_addr1;
 };
 
+struct nphy_rf_control_override_rev7 {
+	u16 field;
+	u16 val_addr_core0;
+	u16 val_addr_core1;
+	u16 val_mask;
+	u8 val_shift;
+};
+
 struct nphy_gain_ctl_workaround_entry {
 	s8 lna1_gain[4];
 	s8 lna2_gain[4];
@@ -202,5 +210,7 @@
 	tbl_rf_control_override_rev2[];
 extern const struct nphy_rf_control_override_rev3
 	tbl_rf_control_override_rev3[];
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+	struct b43_wldev *dev, u16 field, u8 override);
 
 #endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 8156135..291cdf6 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1920,7 +1920,7 @@
 		return 0;
 	ssb_write32(gpiodev, B43legacy_GPIO_CONTROL,
 		    (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL)
-		     & mask) | set);
+		     & ~mask) | set);
 
 	return 0;
 }
@@ -2492,6 +2492,7 @@
 }
 
 static void b43legacy_op_tx(struct ieee80211_hw *hw,
+			    struct ieee80211_tx_control *control,
 			    struct sk_buff *skb)
 {
 	struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 9a4c63f..7ed7d75 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -382,9 +382,7 @@
 {
 	struct brcms_c_info *wlc = wlc_cm->wlc;
 	struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
-	const struct ieee80211_reg_rule *reg_rule;
 	struct txpwr_limits txpwr;
-	int ret;
 
 	brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
 
@@ -393,8 +391,7 @@
 	);
 
 	/* set or restore gmode as required by regulatory */
-	ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule);
-	if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
+	if (ch->flags & IEEE80211_CHAN_NO_OFDM)
 		brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
 	else
 		brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 9e79d47..513e172 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -86,7 +86,9 @@
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
 MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
-
+/* This needs to be adjusted when brcms_firmwares changes */
+MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
+MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
 
 /* recognized BCMA Core IDs */
 static struct bcma_device_id brcms_coreid_table[] = {
@@ -121,7 +123,8 @@
 		 IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN2GHZ(14, 2484,
 		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
+		 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
+		 IEEE80211_CHAN_NO_OFDM)
 };
 
 static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
@@ -264,7 +267,9 @@
 	}
 }
 
-static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void brcms_ops_tx(struct ieee80211_hw *hw,
+			 struct ieee80211_tx_control *control,
+			 struct sk_buff *skb)
 {
 	struct brcms_info *wl = hw->priv;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -276,7 +281,7 @@
 		goto done;
 	}
 	brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
-	tx_info->rate_driver_data[0] = tx_info->control.sta;
+	tx_info->rate_driver_data[0] = control->sta;
  done:
 	spin_unlock_bh(&wl->lock);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 03ca653..75086b3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7512,15 +7512,10 @@
 
 	channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
 
-	if (channel > 14) {
-		rx_status->band = IEEE80211_BAND_5GHZ;
-		rx_status->freq = ieee80211_ofdm_chan_to_freq(
-					WF_CHAN_FACTOR_5_G/2, channel);
-
-	} else {
-		rx_status->band = IEEE80211_BAND_2GHZ;
-		rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
-	}
+	rx_status->band =
+		channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+	rx_status->freq =
+		ieee80211_channel_to_frequency(channel, rx_status->band);
 
 	rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
 
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index f10d302..c11a290 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -67,11 +67,6 @@
 #define WL_CHANSPEC_BAND_2G		0x2000
 #define INVCHANSPEC			255
 
-/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
-#define WF_CHAN_FACTOR_2_4_G		4814	/* 2.4 GHz band, 2407 MHz */
-#define WF_CHAN_FACTOR_5_G		10000	/* 5   GHz band, 5000 MHz */
-#define WF_CHAN_FACTOR_4_G		8000	/* 4.9 GHz band for Japan */
-
 #define CHSPEC_CHANNEL(chspec)	((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
 #define CHSPEC_BAND(chspec)	((chspec) & WL_CHANSPEC_BAND_MASK)
 
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index faec404..e252acb 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -460,7 +460,9 @@
  * start C_TX command process
  */
 static int
-il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il3945_tx_skb(struct il_priv *il,
+	      struct ieee80211_sta *sta,
+	      struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,7 +514,7 @@
 	hdr_len = ieee80211_hdrlen(fc);
 
 	/* Find idx into station table for destination station */
-	sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+	sta_id = il_sta_id_or_broadcast(il, sta);
 	if (sta_id == IL_INVALID_STATION) {
 		D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
 		goto drop;
@@ -2859,7 +2861,9 @@
 }
 
 static void
-il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il3945_mac_tx(struct ieee80211_hw *hw,
+	       struct ieee80211_tx_control *control,
+	       struct sk_buff *skb)
 {
 	struct il_priv *il = hw->priv;
 
@@ -2868,7 +2872,7 @@
 	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
 	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-	if (il3945_tx_skb(il, skb))
+	if (il3945_tx_skb(il, control->sta, skb))
 		dev_kfree_skb_any(skb);
 
 	D_MAC80211("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 34f61a0..eac4dc8 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -1526,8 +1526,11 @@
 }
 
 static void
-il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
-			 struct ieee80211_tx_info *info, __le16 fc)
+il4965_tx_cmd_build_rate(struct il_priv *il,
+			 struct il_tx_cmd *tx_cmd,
+			 struct ieee80211_tx_info *info,
+			 struct ieee80211_sta *sta,
+			 __le16 fc)
 {
 	const u8 rts_retry_limit = 60;
 	u32 rate_flags;
@@ -1561,9 +1564,7 @@
 	rate_idx = info->control.rates[0].idx;
 	if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
 	    || rate_idx > RATE_COUNT_LEGACY)
-		rate_idx =
-		    rate_lowest_index(&il->bands[info->band],
-				      info->control.sta);
+		rate_idx = rate_lowest_index(&il->bands[info->band], sta);
 	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
 	if (info->band == IEEE80211_BAND_5GHZ)
 		rate_idx += IL_FIRST_OFDM_RATE;
@@ -1630,11 +1631,12 @@
  * start C_TX command process
  */
 int
-il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il4965_tx_skb(struct il_priv *il,
+	      struct ieee80211_sta *sta,
+	      struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_sta *sta = info->control.sta;
 	struct il_station_priv *sta_priv = NULL;
 	struct il_tx_queue *txq;
 	struct il_queue *q;
@@ -1680,7 +1682,7 @@
 		sta_id = il->hw_params.bcast_id;
 	else {
 		/* Find idx into station table for destination station */
-		sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+		sta_id = il_sta_id_or_broadcast(il, sta);
 
 		if (sta_id == IL_INVALID_STATION) {
 			D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1786,7 +1788,7 @@
 	/* TODO need this for burst mode later on */
 	il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
 
-	il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+	il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
 
 	il_update_stats(il, true, fc, len);
 	/*
@@ -5828,7 +5830,9 @@
 }
 
 void
-il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il4965_mac_tx(struct ieee80211_hw *hw,
+	      struct ieee80211_tx_control *control,
+	      struct sk_buff *skb)
 {
 	struct il_priv *il = hw->priv;
 
@@ -5837,7 +5841,7 @@
 	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
 	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-	if (il4965_tx_skb(il, skb))
+	if (il4965_tx_skb(il, control->sta, skb))
 		dev_kfree_skb_any(skb);
 
 	D_MACDUMP("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1db6776..2d092f3 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -78,7 +78,9 @@
 int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
 void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
 				 struct ieee80211_tx_info *info);
-int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_skb(struct il_priv *il,
+		  struct ieee80211_sta *sta,
+		  struct sk_buff *skb);
 int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid, u16 * ssn);
 int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
@@ -163,7 +165,9 @@
 int il4965_eeprom_check_version(struct il_priv *il);
 
 /* mac80211 handlers (for 4965) */
-void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void il4965_mac_tx(struct ieee80211_hw *hw,
+		   struct ieee80211_tx_control *control,
+		   struct sk_buff *skb);
 int il4965_mac_start(struct ieee80211_hw *hw);
 void il4965_mac_stop(struct ieee80211_hw *hw);
 void il4965_configure_filter(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 0370403..eb99875 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4860,7 +4860,7 @@
 
 #ifdef CONFIG_PM
 
-int
+static int
 il_pci_suspend(struct device *device)
 {
 	struct pci_dev *pdev = to_pci_dev(device);
@@ -4877,9 +4877,8 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(il_pci_suspend);
 
-int
+static int
 il_pci_resume(struct device *device)
 {
 	struct pci_dev *pdev = to_pci_dev(device);
@@ -4906,16 +4905,8 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(il_pci_resume);
 
-const struct dev_pm_ops il_pm_ops = {
-	.suspend = il_pci_suspend,
-	.resume = il_pci_resume,
-	.freeze = il_pci_suspend,
-	.thaw = il_pci_resume,
-	.poweroff = il_pci_suspend,
-	.restore = il_pci_resume,
-};
+SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
 EXPORT_SYMBOL(il_pm_ops);
 
 #endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 5f50177..3d3135e 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1845,8 +1845,6 @@
 			  u32 beacon_interval);
 
 #ifdef CONFIG_PM
-int il_pci_suspend(struct device *device);
-int il_pci_resume(struct device *device);
 extern const struct dev_pm_ops il_pm_ops;
 
 #define IL_LEGACY_PM_OPS	(&il_pm_ops)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 9bb16bdf6..75e12f2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -201,7 +201,9 @@
 
 
 /* tx */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwlagn_tx_skb(struct iwl_priv *priv,
+		  struct ieee80211_sta *sta,
+		  struct sk_buff *skb);
 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
@@ -485,16 +487,13 @@
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
 #else
-static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+static inline int iwl_dbgfs_register(struct iwl_priv *priv,
+				     struct dentry *dbgfs_dir)
 {
 	return 0;
 }
-static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
 #ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 46782f1..ce826bc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2349,24 +2349,19 @@
  * Create the debugfs files and directories
  *
  */
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
 {
-	struct dentry *phyd = priv->hw->wiphy->debugfsdir;
-	struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+	struct dentry *dir_data, *dir_rf, *dir_debug;
 
-	dir_drv = debugfs_create_dir(name, phyd);
-	if (!dir_drv)
-		return -ENOMEM;
+	priv->debugfs_dir = dbgfs_dir;
 
-	priv->debugfs_dir = dir_drv;
-
-	dir_data = debugfs_create_dir("data", dir_drv);
+	dir_data = debugfs_create_dir("data", dbgfs_dir);
 	if (!dir_data)
 		goto err;
-	dir_rf = debugfs_create_dir("rf", dir_drv);
+	dir_rf = debugfs_create_dir("rf", dbgfs_dir);
 	if (!dir_rf)
 		goto err;
-	dir_debug = debugfs_create_dir("debug", dir_drv);
+	dir_debug = debugfs_create_dir("debug", dbgfs_dir);
 	if (!dir_debug)
 		goto err;
 
@@ -2412,25 +2407,30 @@
 	/* Calibrations disabled/enabled status*/
 	DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
 
-	if (iwl_trans_dbgfs_register(priv->trans, dir_debug))
-		goto err;
+	/*
+	 * Create a symlink with mac80211. This is not very robust, as it does
+	 * not remove the symlink created. The implicit assumption is that
+	 * when the opmode exits, mac80211 will also exit, and will remove
+	 * this symlink as part of its cleanup.
+	 */
+	if (priv->mac80211_registered) {
+		char buf[100];
+		struct dentry *mac80211_dir, *dev_dir, *root_dir;
+
+		dev_dir = dbgfs_dir->d_parent;
+		root_dir = dev_dir->d_parent;
+		mac80211_dir = priv->hw->wiphy->debugfsdir;
+
+		snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
+			 dev_dir->d_name.name);
+
+		if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
+			goto err;
+	}
+
 	return 0;
 
 err:
-	IWL_ERR(priv, "Can't create the debugfs directory\n");
-	iwl_dbgfs_unregister(priv);
+	IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
 	return -ENOMEM;
 }
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-	if (!priv->debugfs_dir)
-		return;
-
-	debugfs_remove_recursive(priv->debugfs_dir);
-	priv->debugfs_dir = NULL;
-}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index a5f7bce..ff8162d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -195,7 +195,7 @@
 			ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
 	}
 
-	hw->wiphy->max_remain_on_channel_duration = 1000;
+	hw->wiphy->max_remain_on_channel_duration = 500;
 
 	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
 			    WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -511,14 +511,16 @@
 }
 #endif
 
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwlagn_mac_tx(struct ieee80211_hw *hw,
+			  struct ieee80211_tx_control *control,
+			  struct sk_buff *skb)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
 	IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
 		     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-	if (iwlagn_tx_skb(priv, skb))
+	if (iwlagn_tx_skb(priv, control->sta, skb))
 		dev_kfree_skb_any(skb);
 }
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 84d3db5..7ff3f143 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -862,7 +862,8 @@
 	 * No race since we hold the mutex here and a new one
 	 * can't come in at this time.
 	 */
-	ieee80211_remain_on_channel_expired(priv->hw);
+	if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
+		ieee80211_remain_on_channel_expired(priv->hw);
 
 	exit_pending =
 		test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -994,7 +995,11 @@
 		iwlagn_prepare_restart(priv);
 		mutex_unlock(&priv->mutex);
 		iwl_cancel_deferred_work(priv);
-		ieee80211_restart_hw(priv->hw);
+		if (priv->mac80211_registered)
+			ieee80211_restart_hw(priv->hw);
+		else
+			IWL_ERR(priv,
+				"Cannot request restart before registrating with mac80211");
 	} else {
 		WARN_ON(1);
 	}
@@ -1222,7 +1227,8 @@
 
 static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
 						 const struct iwl_cfg *cfg,
-						 const struct iwl_fw *fw)
+						 const struct iwl_fw *fw,
+						 struct dentry *dbgfs_dir)
 {
 	struct iwl_priv *priv;
 	struct ieee80211_hw *hw;
@@ -1466,13 +1472,17 @@
 	if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
 		goto out_destroy_workqueue;
 
-	if (iwl_dbgfs_register(priv, DRV_NAME))
-		IWL_ERR(priv,
-			"failed to create debugfs files. Ignoring error\n");
+	if (iwl_dbgfs_register(priv, dbgfs_dir))
+		goto out_mac80211_unregister;
 
 	return op_mode;
 
+out_mac80211_unregister:
+	iwlagn_mac_unregister(priv);
 out_destroy_workqueue:
+	iwl_tt_exit(priv);
+	iwl_testmode_free(priv);
+	iwl_cancel_deferred_work(priv);
 	destroy_workqueue(priv->workqueue);
 	priv->workqueue = NULL;
 	iwl_uninit_drv(priv);
@@ -1493,8 +1503,6 @@
 
 	IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
 
-	iwl_dbgfs_unregister(priv);
-
 	iwl_testmode_free(priv);
 	iwlagn_mac_unregister(priv);
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 6fddd27..a82f46c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -707,11 +707,14 @@
  */
 static bool rs_use_green(struct ieee80211_sta *sta)
 {
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_rxon_context *ctx = sta_priv->ctx;
-
-	return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
-		!(ctx->ht.non_gf_sta_present);
+	/*
+	 * There's a bug somewhere in this code that causes the
+	 * scaling to get stuck because GF+SGI can't be combined
+	 * in SISO rates. Until we find that bug, disable GF, it
+	 * has only limited benefit and we still interoperate with
+	 * GF APs since we can always receive GF transmissions.
+	 */
+	return false;
 }
 
 /**
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index b29b798..fe36a38 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -150,7 +150,7 @@
 		       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
 
 	if (!(flags & CMD_ASYNC)) {
-		cmd.flags |= CMD_WANT_SKB;
+		cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
 		might_sleep();
 	}
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 5971a23..f5ca73a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -127,6 +127,7 @@
 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
 				     struct iwl_tx_cmd *tx_cmd,
 				     struct ieee80211_tx_info *info,
+				     struct ieee80211_sta *sta,
 				     __le16 fc)
 {
 	u32 rate_flags;
@@ -187,8 +188,7 @@
 	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
 			(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
 		rate_idx = rate_lowest_index(
-				&priv->eeprom_data->bands[info->band],
-				info->control.sta);
+				&priv->eeprom_data->bands[info->band], sta);
 	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
 	if (info->band == IEEE80211_BAND_5GHZ)
 		rate_idx += IWL_FIRST_OFDM_RATE;
@@ -291,7 +291,9 @@
 /*
  * start REPLY_TX command process
  */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+int iwlagn_tx_skb(struct iwl_priv *priv,
+		  struct ieee80211_sta *sta,
+		  struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -345,7 +347,7 @@
 		sta_id = ctx->bcast_sta_id;
 	else {
 		/* Find index into station table for destination station */
-		sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta);
+		sta_id = iwl_sta_id_or_broadcast(ctx, sta);
 		if (sta_id == IWL_INVALID_STATION) {
 			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
 				       hdr->addr1);
@@ -355,8 +357,8 @@
 
 	IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
 
-	if (info->control.sta)
-		sta_priv = (void *)info->control.sta->drv_priv;
+	if (sta)
+		sta_priv = (void *)sta->drv_priv;
 
 	if (sta_priv && sta_priv->asleep &&
 	    (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
@@ -397,7 +399,7 @@
 	/* TODO need this for burst mode later on */
 	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
 
-	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
 
 	memset(&info->status, 0, sizeof(info->status));
 
@@ -431,7 +433,7 @@
 		 * only. Check this here.
 		 */
 		if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
-		    tid_data->agg.state != IWL_AGG_OFF,
+			      tid_data->agg.state != IWL_AGG_OFF,
 		    "Tx while agg.state = %d", tid_data->agg.state))
 			goto drop_unlock_sta;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index cc41cfa..48d6d44 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -101,6 +101,10 @@
 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
 MODULE_LICENSE("GPL");
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static struct dentry *iwl_dbgfs_root;
+#endif
+
 /**
  * struct iwl_drv - drv common data
  * @list: list of drv structures using this opmode
@@ -126,6 +130,12 @@
 	char firmware_name[25];         /* name of firmware file to load */
 
 	struct completion request_firmware_complete;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct dentry *dbgfs_drv;
+	struct dentry *dbgfs_trans;
+	struct dentry *dbgfs_op_mode;
+#endif
 };
 
 #define DVM_OP_MODE	0
@@ -194,7 +204,8 @@
 	return 0;
 }
 
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
+static void iwl_req_fw_callback(const struct firmware *ucode_raw,
+				void *context);
 
 #define UCODE_EXPERIMENTAL_INDEX	100
 #define UCODE_EXPERIMENTAL_TAG		"exp"
@@ -231,7 +242,7 @@
 
 	return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
 				       drv->trans->dev,
-				       GFP_KERNEL, drv, iwl_ucode_callback);
+				       GFP_KERNEL, drv, iwl_req_fw_callback);
 }
 
 struct fw_img_parsing {
@@ -759,13 +770,57 @@
 	return 0;
 }
 
+static struct iwl_op_mode *
+_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
+{
+	const struct iwl_op_mode_ops *ops = op->ops;
+	struct dentry *dbgfs_dir = NULL;
+	struct iwl_op_mode *op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+						drv->dbgfs_drv);
+	if (!drv->dbgfs_op_mode) {
+		IWL_ERR(drv,
+			"failed to create opmode debugfs directory\n");
+		return op_mode;
+	}
+	dbgfs_dir = drv->dbgfs_op_mode;
+#endif
+
+	op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (!op_mode) {
+		debugfs_remove_recursive(drv->dbgfs_op_mode);
+		drv->dbgfs_op_mode = NULL;
+	}
+#endif
+
+	return op_mode;
+}
+
+static void _iwl_op_mode_stop(struct iwl_drv *drv)
+{
+	/* op_mode can be NULL if its start failed */
+	if (drv->op_mode) {
+		iwl_op_mode_stop(drv->op_mode);
+		drv->op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		debugfs_remove_recursive(drv->dbgfs_op_mode);
+		drv->dbgfs_op_mode = NULL;
+#endif
+	}
+}
+
 /**
- * iwl_ucode_callback - callback when firmware was loaded
+ * iwl_req_fw_callback - callback when firmware was loaded
  *
  * If loaded successfully, copies the firmware into buffers
  * for the card to fetch (via DMA).
  */
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
+static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
 {
 	struct iwl_drv *drv = context;
 	struct iwl_fw *fw = &drv->fw;
@@ -908,8 +963,7 @@
 	list_add_tail(&drv->list, &op->drv);
 
 	if (op->ops) {
-		const struct iwl_op_mode_ops *ops = op->ops;
-		drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
+		drv->op_mode = _iwl_op_mode_start(drv, op);
 
 		if (!drv->op_mode) {
 			mutex_unlock(&iwlwifi_opmode_table_mtx);
@@ -969,24 +1023,51 @@
 	init_completion(&drv->request_firmware_complete);
 	INIT_LIST_HEAD(&drv->list);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	/* Create the device debugfs entries. */
+	drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
+					    iwl_dbgfs_root);
+
+	if (!drv->dbgfs_drv) {
+		IWL_ERR(drv, "failed to create debugfs directory\n");
+		goto err_free_drv;
+	}
+
+	/* Create transport layer debugfs dir */
+	drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
+
+	if (!drv->trans->dbgfs_dir) {
+		IWL_ERR(drv, "failed to create transport debugfs directory\n");
+		goto err_free_dbgfs;
+	}
+#endif
+
 	ret = iwl_request_firmware(drv, true);
 
 	if (ret) {
 		IWL_ERR(trans, "Couldn't request the fw\n");
-		kfree(drv);
-		drv = NULL;
+		goto err_fw;
 	}
 
 	return drv;
+
+err_fw:
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+err_free_dbgfs:
+	debugfs_remove_recursive(drv->dbgfs_drv);
+err_free_drv:
+#endif
+	kfree(drv);
+	drv = NULL;
+
+	return drv;
 }
 
 void iwl_drv_stop(struct iwl_drv *drv)
 {
 	wait_for_completion(&drv->request_firmware_complete);
 
-	/* op_mode can be NULL if its start failed */
-	if (drv->op_mode)
-		iwl_op_mode_stop(drv->op_mode);
+	_iwl_op_mode_stop(drv);
 
 	iwl_dealloc_ucode(drv);
 
@@ -1000,6 +1081,10 @@
 		list_del(&drv->list);
 	mutex_unlock(&iwlwifi_opmode_table_mtx);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	debugfs_remove_recursive(drv->dbgfs_drv);
+#endif
+
 	kfree(drv);
 }
 
@@ -1022,15 +1107,18 @@
 {
 	int i;
 	struct iwl_drv *drv;
+	struct iwlwifi_opmode_table *op;
 
 	mutex_lock(&iwlwifi_opmode_table_mtx);
 	for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
-		if (strcmp(iwlwifi_opmode_table[i].name, name))
+		op = &iwlwifi_opmode_table[i];
+		if (strcmp(op->name, name))
 			continue;
-		iwlwifi_opmode_table[i].ops = ops;
-		list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
-			drv->op_mode = ops->start(drv->trans, drv->cfg,
-						  &drv->fw);
+		op->ops = ops;
+		/* TODO: need to handle exceptional case */
+		list_for_each_entry(drv, &op->drv, list)
+			drv->op_mode = _iwl_op_mode_start(drv, op);
+
 		mutex_unlock(&iwlwifi_opmode_table_mtx);
 		return 0;
 	}
@@ -1051,12 +1139,9 @@
 		iwlwifi_opmode_table[i].ops = NULL;
 
 		/* call the stop routine for all devices */
-		list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
-			if (drv->op_mode) {
-				iwl_op_mode_stop(drv->op_mode);
-				drv->op_mode = NULL;
-			}
-		}
+		list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
+			_iwl_op_mode_stop(drv);
+
 		mutex_unlock(&iwlwifi_opmode_table_mtx);
 		return;
 	}
@@ -1076,6 +1161,14 @@
 	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
 	pr_info(DRV_COPYRIGHT "\n");
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	/* Create the root of iwlwifi debugfs subsystem. */
+	iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
+
+	if (!iwl_dbgfs_root)
+		return -EFAULT;
+#endif
+
 	return iwl_pci_register_driver();
 }
 module_init(iwl_drv_init);
@@ -1083,6 +1176,10 @@
 static void __exit iwl_drv_exit(void)
 {
 	iwl_pci_unregister_driver();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	debugfs_remove_recursive(iwl_dbgfs_root);
+#endif
 }
 module_exit(iwl_drv_exit);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 2cbf137..285de5f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -90,9 +90,9 @@
  * 4) The bus specific component configures the bus
  * 5) The bus specific component calls to the drv bus agnostic part
  *    (iwl_drv_start)
- * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback
- * 7) iwl_ucode_callback parses the fw file
- * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
+ * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
+ * 7) iwl_req_fw_callback parses the fw file
+ * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
  */
 
 struct iwl_drv;
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 64886f9..c8d9b95 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -134,7 +134,8 @@
 struct iwl_op_mode_ops {
 	struct iwl_op_mode *(*start)(struct iwl_trans *trans,
 				     const struct iwl_cfg *cfg,
-				     const struct iwl_fw *fw);
+				     const struct iwl_fw *fw,
+				     struct dentry *dbgfs_dir);
 	void (*stop)(struct iwl_op_mode *op_mode);
 	int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
 		  struct iwl_device_cmd *cmd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 92576a3..ff11542 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -184,14 +184,20 @@
  * @CMD_SYNC: The caller will be stalled until the fw responds to the command
  * @CMD_ASYNC: Return right away and don't want for the response
  * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
- *	response.
+ *	response. The caller needs to call iwl_free_resp when done.
+ * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
+ *	response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
+ *	copied. The pointer passed to the response handler is in the transport
+ *	ownership and don't need to be freed by the op_mode. This also means
+ *	that the pointer is invalidated after the op_mode's handler returns.
  * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
  */
 enum CMD_MODE {
 	CMD_SYNC = 0,
 	CMD_ASYNC = BIT(0),
 	CMD_WANT_SKB = BIT(1),
-	CMD_ON_DEMAND = BIT(2),
+	CMD_WANT_HCMD = BIT(2),
+	CMD_ON_DEMAND = BIT(3),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -460,6 +466,8 @@
 	size_t dev_cmd_headroom;
 	char dev_cmd_pool_name[50];
 
+	struct dentry *dbgfs_dir;
+
 	/* pointer to trans specific struct */
 	/*Ensure that this pointer will always be aligned to sizeof pointer */
 	char trans_specific[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f4c3500..89bfb43 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -282,8 +282,14 @@
 	if (!trans_pcie->drv)
 		goto out_free_trans;
 
+	/* register transport layer debugfs here */
+	if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
+		goto out_free_drv;
+
 	return 0;
 
+out_free_drv:
+	iwl_drv_stop(trans_pcie->drv);
 out_free_trans:
 	iwl_trans_pcie_free(iwl_trans);
 	pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d9694c5..3ef8d5a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -184,6 +184,7 @@
 
 struct iwl_pcie_tx_queue_entry {
 	struct iwl_device_cmd *cmd;
+	struct iwl_device_cmd *copy_cmd;
 	struct sk_buff *skb;
 	struct iwl_cmd_meta meta;
 };
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 39a6ca1..d80604a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -421,13 +421,23 @@
 		index = SEQ_TO_INDEX(sequence);
 		cmd_index = get_cmd_index(&txq->q, index);
 
-		if (reclaim)
-			cmd = txq->entries[cmd_index].cmd;
-		else
+		if (reclaim) {
+			struct iwl_pcie_tx_queue_entry *ent;
+			ent = &txq->entries[cmd_index];
+			cmd = ent->copy_cmd;
+			WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
+		} else {
 			cmd = NULL;
+		}
 
 		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
 
+		if (reclaim) {
+			/* The original command isn't needed any more */
+			kfree(txq->entries[cmd_index].copy_cmd);
+			txq->entries[cmd_index].copy_cmd = NULL;
+		}
+
 		/*
 		 * After here, we should always check rxcb._page_stolen,
 		 * if it is true then one of the handlers took the page.
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 939c2f7..38f51b0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -492,10 +492,11 @@
 	iwl_tx_queue_unmap(trans, txq_id);
 
 	/* De-alloc array of command/tx buffers */
-
 	if (txq_id == trans_pcie->cmd_queue)
-		for (i = 0; i < txq->q.n_window; i++)
+		for (i = 0; i < txq->q.n_window; i++) {
 			kfree(txq->entries[i].cmd);
+			kfree(txq->entries[i].copy_cmd);
+		}
 
 	/* De-alloc circular buffer of TFDs */
 	if (txq->q.n_bd) {
@@ -896,6 +897,7 @@
 static int iwl_prepare_card_hw(struct iwl_trans *trans)
 {
 	int ret;
+	int t = 0;
 
 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 
@@ -908,17 +910,15 @@
 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 		    CSR_HW_IF_CONFIG_REG_PREPARE);
 
-	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
-			   ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
-			   CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+	do {
+		ret = iwl_set_hw_ready(trans);
+		if (ret >= 0)
+			return 0;
 
-	if (ret < 0)
-		return ret;
+		usleep_range(200, 1000);
+		t += 200;
+	} while (t < 150000);
 
-	/* HW should be ready by now, check again. */
-	ret = iwl_set_hw_ready(trans);
-	if (ret >= 0)
-		return 0;
 	return ret;
 }
 
@@ -1769,7 +1769,7 @@
 #define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
 	if (!debugfs_create_file(#name, mode, parent, trans,		\
 				 &iwl_dbgfs_##name##_ops))		\
-		return -ENOMEM;						\
+		goto err;						\
 } while (0)
 
 /* file operation */
@@ -2033,6 +2033,10 @@
 	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
 	DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
 	return 0;
+
+err:
+	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
+	return -ENOMEM;
 }
 #else
 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6baf8de..392d2bc 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -521,7 +521,7 @@
 	u16 copy_size, cmd_size;
 	bool had_nocopy = false;
 	int i;
-	u8 *cmd_dest;
+	u32 cmd_pos;
 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
 	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
 	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
@@ -584,15 +584,31 @@
 					 INDEX_TO_SEQ(q->write_ptr));
 
 	/* and copy the data that needs to be copied */
-
-	cmd_dest = out_cmd->payload;
+	cmd_pos = offsetof(struct iwl_device_cmd, payload);
 	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
 		if (!cmd->len[i])
 			continue;
 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
 			break;
-		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
-		cmd_dest += cmd->len[i];
+		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
+		cmd_pos += cmd->len[i];
+	}
+
+	WARN_ON_ONCE(txq->entries[idx].copy_cmd);
+
+	/*
+	 * since out_cmd will be the source address of the FH, it will write
+	 * the retry count there. So when the user needs to receivce the HCMD
+	 * that corresponds to the response in the response handler, it needs
+	 * to set CMD_WANT_HCMD.
+	 */
+	if (cmd->flags & CMD_WANT_HCMD) {
+		txq->entries[idx].copy_cmd =
+			kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
+		if (unlikely(!txq->entries[idx].copy_cmd)) {
+			idx = -ENOMEM;
+			goto out;
+		}
 	}
 
 	IWL_DEBUG_HC(trans,
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index eb5de80..1c10b54 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1254,6 +1254,7 @@
 			netif_tx_wake_all_queues(priv->dev);
 	}
 
+	kfree(cmd);
 done:
 	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
 	return ret;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 76caeba..e970897 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1314,6 +1314,7 @@
 		kfree(packet);
 	}
 
+	kfree(card);
 	lbs_deb_leave(LBS_DEB_SDIO);
 }
 
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 5804818..fe1ea43c 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -571,7 +571,10 @@
 			netdev_info(dev, "Timeout submitting command 0x%04x\n",
 				    le16_to_cpu(cmdnode->cmdbuf->command));
 			lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
-			if (priv->reset_card)
+
+			/* Reset card, but only when it isn't in the process
+			 * of being shutdown anyway. */
+			if (!dev->dismantle && priv->reset_card)
 				priv->reset_card(priv);
 		}
 		priv->cmd_timed_out = 0;
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index a034572..70018562 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -227,7 +227,9 @@
 	lbtf_deb_leave(LBTF_DEB_MAIN);
 }
 
-static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw,
+		       struct ieee80211_tx_control *control,
+		       struct sk_buff *skb)
 {
 	struct lbtf_private *priv = hw->priv;
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0083839..72b0456 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -709,7 +709,9 @@
 	return ack;
 }
 
-static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+			      struct ieee80211_tx_control *control,
+			      struct sk_buff *skb)
 {
 	bool ack;
 	struct ieee80211_tx_info *txi;
@@ -1727,6 +1729,7 @@
 #endif
 				 BIT(NL80211_IFTYPE_AP) |
 				 BIT(NL80211_IFTYPE_P2P_GO) },
+	{ .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
 static const struct ieee80211_iface_combination hwsim_if_comb = {
@@ -1813,7 +1816,8 @@
 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
 			BIT(NL80211_IFTYPE_P2P_GO) |
 			BIT(NL80211_IFTYPE_ADHOC) |
-			BIT(NL80211_IFTYPE_MESH_POINT);
+			BIT(NL80211_IFTYPE_MESH_POINT) |
+			BIT(NL80211_IFTYPE_P2P_DEVICE);
 
 		hw->flags = IEEE80211_HW_MFP_CAPABLE |
 			    IEEE80211_HW_SIGNAL_DBM |
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index e535c93..d273273 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -726,3 +726,29 @@
 
 	return count;
 }
+
+/*
+ * This function retrieves the entry for specific tx BA stream table by RA and
+ * deletes it.
+ */
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
+{
+	struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
+	unsigned long flags;
+
+	if (!ra)
+		return;
+
+	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+	list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) {
+		if (!memcmp(tbl->ra, ra, ETH_ALEN)) {
+			spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
+					       flags);
+			mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
+			spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+
+	return;
+}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 28366e9..67c087c 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -69,6 +69,7 @@
 int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
 				int cmd_action,
 				struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
 
 /*
  * This function checks whether AMPDU is allowed or not for a particular TID.
@@ -157,4 +158,18 @@
 
 	return false;
 }
+
+/*
+ * This function checks whether associated station is 11n enabled
+ */
+static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
+					     struct mwifiex_sta_node *node)
+{
+
+	if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
+	    !priv->ap_11n_enabled)
+		return 0;
+
+	return node->is_11n_enabled;
+}
 #endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index ab84eb9..395f1bf 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -62,9 +62,7 @@
 	};
 	struct tx_packet_hdr *tx_header;
 
-	skb_put(skb_aggr, sizeof(*tx_header));
-
-	tx_header = (struct tx_packet_hdr *) skb_aggr->data;
+	tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
 
 	/* Copy DA and SA */
 	dt_offset = 2 * ETH_ALEN;
@@ -82,12 +80,10 @@
 	tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
 
 	/* Add payload */
-	skb_put(skb_aggr, skb_src->len);
-	memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
-	       skb_src->len);
-	*pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
-						      LLC_SNAP_LEN)) & 3)) : 0;
-	skb_put(skb_aggr, *pad);
+	memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
+
+	/* Add padding for new MSDU to start from 4 byte boundary */
+	*pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
 
 	return skb_aggr->len + *pad;
 }
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 591ccd3..24e2582 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -54,8 +54,13 @@
 			tbl->rx_reorder_ptr[i] = NULL;
 		}
 		spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-		if (rx_tmp_ptr)
-			mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+		if (rx_tmp_ptr) {
+			if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+				mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+			else
+				mwifiex_process_rx_packet(priv->adapter,
+							  rx_tmp_ptr);
+		}
 	}
 
 	spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -97,7 +102,11 @@
 		rx_tmp_ptr = tbl->rx_reorder_ptr[i];
 		tbl->rx_reorder_ptr[i] = NULL;
 		spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-		mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+
+		if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+			mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+		else
+			mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
 	}
 
 	spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -148,7 +157,7 @@
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
  */
-static struct mwifiex_rx_reorder_tbl *
+struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
 	struct mwifiex_rx_reorder_tbl *tbl;
@@ -167,6 +176,31 @@
 	return NULL;
 }
 
+/* This function retrieves the pointer to an entry in Rx reordering
+ * table which matches the given TA and deletes it.
+ */
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
+{
+	struct mwifiex_rx_reorder_tbl *tbl, *tmp;
+	unsigned long flags;
+
+	if (!ta)
+		return;
+
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+		if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
+			mwifiex_del_rx_reorder_entry(priv, tbl);
+			spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
+	return;
+}
+
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
@@ -226,6 +260,7 @@
 	struct mwifiex_rx_reorder_tbl *tbl, *new_node;
 	u16 last_seq = 0;
 	unsigned long flags;
+	struct mwifiex_sta_node *node;
 
 	/*
 	 * If we get a TID, ta pair which is already present dispatch all the
@@ -248,13 +283,19 @@
 	new_node->tid = tid;
 	memcpy(new_node->ta, ta, ETH_ALEN);
 	new_node->start_win = seq_num;
-	if (mwifiex_queuing_ra_based(priv))
-		/* TODO for adhoc */
+
+	if (mwifiex_queuing_ra_based(priv)) {
 		dev_dbg(priv->adapter->dev,
-			"info: ADHOC:last_seq=%d start_win=%d\n",
+			"info: AP/ADHOC:last_seq=%d start_win=%d\n",
 			last_seq, new_node->start_win);
-	else
+		if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
+			node = mwifiex_get_sta_entry(priv, ta);
+			if (node)
+				last_seq = node->rx_seq[tid];
+		}
+	} else {
 		last_seq = priv->rx_seq[tid];
+	}
 
 	if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
 	    last_seq >= new_node->start_win)
@@ -396,8 +437,13 @@
 
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
 	if (!tbl) {
-		if (pkt_type != PKT_TYPE_BAR)
-			mwifiex_process_rx_packet(priv->adapter, payload);
+		if (pkt_type != PKT_TYPE_BAR) {
+			if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+				mwifiex_handle_uap_rx_forward(priv, payload);
+			else
+				mwifiex_process_rx_packet(priv->adapter,
+							  payload);
+		}
 		return 0;
 	}
 	start_win = tbl->start_win;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 6c9815a..7284859 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -38,6 +38,8 @@
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
 #define MWIFIEX_DEF_11N_RX_SEQ_NUM	0xffff
+#define BA_SETUP_MAX_PACKET_THRESHOLD	16
+#define BA_SETUP_PACKET_OFFSET		16
 
 static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
 {
@@ -68,5 +70,8 @@
 							   mwifiex_private
 							   *priv, int tid,
 							   u8 *ta);
+struct mwifiex_rx_reorder_tbl *
+mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
 
 #endif /* _MWIFIEX_11N_RXREORDER_H_ */
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 3f66ebb..dd0410d 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -33,8 +33,10 @@
 mwifiex-y += ie.o
 mwifiex-y += sta_cmdresp.o
 mwifiex-y += sta_event.o
+mwifiex-y += uap_event.o
 mwifiex-y += sta_tx.o
 mwifiex-y += sta_rx.o
+mwifiex-y += uap_txrx.o
 mwifiex-y += cfg80211.o
 mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
 obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index fe42137..e57f543 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -99,7 +99,7 @@
 	const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 	const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
 
-	if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) {
+	if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
 		wiphy_err(wiphy, "deleting the crypto keys\n");
 		return -EFAULT;
 	}
@@ -171,7 +171,8 @@
 
 	if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
 		priv->wep_key_curr_index = key_index;
-	} else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
+	} else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
+				      NULL, 0)) {
 		wiphy_err(wiphy, "set default Tx key index\n");
 		return -EFAULT;
 	}
@@ -207,7 +208,7 @@
 		return 0;
 	}
 
-	if (mwifiex_set_encode(priv, params->key, params->key_len,
+	if (mwifiex_set_encode(priv, params, params->key, params->key_len,
 			       key_index, peer_mac, 0)) {
 		wiphy_err(wiphy, "crypto keys added\n");
 		return -EFAULT;
@@ -748,6 +749,7 @@
 	WLAN_CIPHER_SUITE_WEP104,
 	WLAN_CIPHER_SUITE_TKIP,
 	WLAN_CIPHER_SUITE_CCMP,
+	WLAN_CIPHER_SUITE_AES_CMAC,
 };
 
 /*
@@ -906,6 +908,8 @@
 	if (mwifiex_del_mgmt_ies(priv))
 		wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
 
+	priv->ap_11n_enabled = 0;
+
 	if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
 				  HostCmd_ACT_GEN_SET, 0, NULL)) {
 		wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -1159,7 +1163,7 @@
 	priv->wep_key_curr_index = 0;
 	priv->sec_info.encryption_mode = 0;
 	priv->sec_info.is_authtype_auto = 0;
-	ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1);
+	ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
 
 	if (mode == NL80211_IFTYPE_ADHOC) {
 		/* "privacy" is set only for ad-hoc mode */
@@ -1206,8 +1210,9 @@
 				"info: setting wep encryption"
 				" with key len %d\n", sme->key_len);
 			priv->wep_key_curr_index = sme->key_idx;
-			ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
-						 sme->key_idx, NULL, 0);
+			ret = mwifiex_set_encode(priv, NULL, sme->key,
+						 sme->key_len, sme->key_idx,
+						 NULL, 0);
 		}
 	}
 done:
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index c68adec..c229ddd 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -447,7 +447,10 @@
 			priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 	}
 
-	ret = mwifiex_process_sta_event(priv);
+	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+		ret = mwifiex_process_uap_event(priv);
+	else
+		ret = mwifiex_process_sta_event(priv);
 
 	adapter->event_cause = 0;
 	adapter->event_skb = NULL;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 070ef25..400d360 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -60,6 +60,9 @@
 #define MWIFIEX_SDIO_BLOCK_SIZE            256
 
 #define MWIFIEX_BUF_FLAG_REQUEUED_PKT      BIT(0)
+#define MWIFIEX_BUF_FLAG_BRIDGED_PKT	   BIT(1)
+
+#define MWIFIEX_BRIDGED_PKTS_THRESHOLD     1024
 
 enum mwifiex_bss_type {
 	MWIFIEX_BSS_TYPE_STA = 0,
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index e831b44..ae06f31 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -65,10 +65,12 @@
 	KEY_TYPE_ID_TKIP,
 	KEY_TYPE_ID_AES,
 	KEY_TYPE_ID_WAPI,
+	KEY_TYPE_ID_AES_CMAC,
 };
 #define KEY_MCAST	BIT(0)
 #define KEY_UNICAST	BIT(1)
 #define KEY_ENABLED	BIT(2)
+#define KEY_IGTK	BIT(10)
 
 #define WAPI_KEY_LEN			50
 
@@ -424,10 +426,10 @@
 struct rxpd {
 	u8 bss_type;
 	u8 bss_num;
-	u16 rx_pkt_length;
-	u16 rx_pkt_offset;
-	u16 rx_pkt_type;
-	u16 seq_num;
+	__le16 rx_pkt_length;
+	__le16 rx_pkt_offset;
+	__le16 rx_pkt_type;
+	__le16 seq_num;
 	u8 priority;
 	u8 rx_rate;
 	s8 snr;
@@ -439,6 +441,31 @@
 	u8 reserved;
 } __packed;
 
+struct uap_txpd {
+	u8 bss_type;
+	u8 bss_num;
+	__le16 tx_pkt_length;
+	__le16 tx_pkt_offset;
+	__le16 tx_pkt_type;
+	__le32 tx_control;
+	u8 priority;
+	u8 flags;
+	u8 pkt_delay_2ms;
+	u8 reserved1;
+	__le32 reserved2;
+};
+
+struct uap_rxpd {
+	u8 bss_type;
+	u8 bss_num;
+	__le16 rx_pkt_length;
+	__le16 rx_pkt_offset;
+	__le16 rx_pkt_type;
+	__le16 seq_num;
+	u8 priority;
+	u8 reserved1;
+};
+
 enum mwifiex_chan_scan_mode_bitmasks {
 	MWIFIEX_PASSIVE_SCAN = BIT(0),
 	MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -558,6 +585,13 @@
 	u8 key[50];
 } __packed;
 
+#define IGTK_PN_LEN		8
+
+struct mwifiex_cmac_param {
+	u8 ipn[IGTK_PN_LEN];
+	u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
 struct host_cmd_ds_802_11_key_material {
 	__le16 action;
 	struct mwifiex_ie_type_key_param_set key_param_set;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 21fdc6c..fad2c8d2b 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -64,60 +64,72 @@
 	struct cmd_ctrl_node *cmd_node, *tmp_node;
 	unsigned long flags;
 
-	if (!mwifiex_wmm_lists_empty(adapter)) {
-		if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+	if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+		/*
+		 * Abort scan operation by cancelling all pending scan
+		 * commands
+		 */
+		spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+		list_for_each_entry_safe(cmd_node, tmp_node,
+					 &adapter->scan_pending_q, list) {
+			list_del(&cmd_node->list);
+			cmd_node->wait_q_enabled = false;
+			mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+		}
+		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+		adapter->scan_processing = false;
+		adapter->scan_delay_cnt = 0;
+		adapter->empty_tx_q_cnt = 0;
+		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+		if (priv->user_scan_cfg) {
+			dev_dbg(priv->adapter->dev,
+				"info: %s: scan aborted\n", __func__);
+			cfg80211_scan_done(priv->scan_request, 1);
+			priv->scan_request = NULL;
+			kfree(priv->user_scan_cfg);
+			priv->user_scan_cfg = NULL;
+		}
+		goto done;
+	}
+
+	if (!atomic_read(&priv->adapter->is_tx_received)) {
+		adapter->empty_tx_q_cnt++;
+		if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
 			/*
-			 * Abort scan operation by cancelling all pending scan
-			 * command
+			 * No Tx traffic for 200msec. Get scan command from
+			 * scan pending queue and put to cmd pending queue to
+			 * resume scan operation
 			 */
+			adapter->scan_delay_cnt = 0;
+			adapter->empty_tx_q_cnt = 0;
 			spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-			list_for_each_entry_safe(cmd_node, tmp_node,
-						 &adapter->scan_pending_q,
-						 list) {
-				list_del(&cmd_node->list);
-				cmd_node->wait_q_enabled = false;
-				mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-			}
+			cmd_node = list_first_entry(&adapter->scan_pending_q,
+						    struct cmd_ctrl_node, list);
+			list_del(&cmd_node->list);
 			spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
 					       flags);
 
-			spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-			adapter->scan_processing = false;
-			spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
-					       flags);
-
-			if (priv->user_scan_cfg) {
-				dev_dbg(priv->adapter->dev,
-					"info: %s: scan aborted\n", __func__);
-				cfg80211_scan_done(priv->scan_request, 1);
-				priv->scan_request = NULL;
-				kfree(priv->user_scan_cfg);
-				priv->user_scan_cfg = NULL;
-			}
-		} else {
-			/*
-			 * Tx data queue is still not empty, delay scan
-			 * operation further by 20msec.
-			 */
-			mod_timer(&priv->scan_delay_timer, jiffies +
-				  msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
-			adapter->scan_delay_cnt++;
+			mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+							true);
+			goto done;
 		}
-		queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
 	} else {
-		/*
-		 * Tx data queue is empty. Get scan command from scan_pending_q
-		 * and put to cmd_pending_q to resume scan operation
-		 */
-		adapter->scan_delay_cnt = 0;
-		spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-		cmd_node = list_first_entry(&adapter->scan_pending_q,
-					    struct cmd_ctrl_node, list);
-		list_del(&cmd_node->list);
-		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
-		mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+		adapter->empty_tx_q_cnt = 0;
 	}
+
+	/* Delay scan operation further by 20msec */
+	mod_timer(&priv->scan_delay_timer, jiffies +
+		  msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+	adapter->scan_delay_cnt++;
+
+done:
+	if (atomic_read(&priv->adapter->is_tx_received))
+		atomic_set(&priv->adapter->is_tx_received, false);
+
+	return;
 }
 
 /*
@@ -196,6 +208,7 @@
 	priv->curr_bcn_size = 0;
 	priv->wps_ie = NULL;
 	priv->wps_ie_len = 0;
+	priv->ap_11n_enabled = 0;
 
 	priv->scan_block = false;
 
@@ -345,6 +358,7 @@
 	memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
 	adapter->arp_filter_size = 0;
 	adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
+	adapter->empty_tx_q_cnt = 0;
 }
 
 /*
@@ -410,6 +424,7 @@
 				list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
 			list_del(&priv->tx_ba_stream_tbl_ptr);
 			list_del(&priv->rx_reorder_tbl_ptr);
+			list_del(&priv->sta_list);
 		}
 	}
 }
@@ -472,6 +487,7 @@
 			spin_lock_init(&priv->rx_pkt_lock);
 			spin_lock_init(&priv->wmm.ra_list_spinlock);
 			spin_lock_init(&priv->curr_bcn_buf_lock);
+			spin_lock_init(&priv->sta_list_spinlock);
 		}
 	}
 
@@ -504,6 +520,7 @@
 		}
 		INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
 		INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
+		INIT_LIST_HEAD(&priv->sta_list);
 
 		spin_lock_init(&priv->tx_ba_stream_tbl_lock);
 		spin_lock_init(&priv->rx_reorder_tbl_lock);
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 5019153..6a5eded 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -213,7 +213,7 @@
 };
 
 #define MWIFIEX_KEY_INDEX_UNICAST	0x40000000
-#define WAPI_RXPN_LEN			16
+#define PN_LEN				16
 
 struct mwifiex_ds_encrypt_key {
 	u32 key_disable;
@@ -222,7 +222,8 @@
 	u8 key_material[WLAN_MAX_KEY_LEN];
 	u8 mac_addr[ETH_ALEN];
 	u32 is_wapi_key;
-	u8 wapi_rxpn[WAPI_RXPN_LEN];
+	u8 pn[PN_LEN];		/* packet number */
+	u8 is_igtk_key;
 };
 
 struct mwifiex_power_cfg {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 4680362..cb11552 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -520,6 +520,9 @@
 	mwifiex_wmm_add_buf_txqueue(priv, skb);
 	atomic_inc(&priv->adapter->tx_pending);
 
+	if (priv->adapter->scan_delay_cnt)
+		atomic_set(&priv->adapter->is_tx_received, true);
+
 	if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
 		mwifiex_set_trans_start(dev);
 		mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index e7c2a82..994bc4f 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -88,6 +88,7 @@
 #define MWIFIEX_MAX_TOTAL_SCAN_TIME	(MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
 
 #define MWIFIEX_MAX_SCAN_DELAY_CNT			50
+#define MWIFIEX_MAX_EMPTY_TX_Q_CNT			10
 #define MWIFIEX_SCAN_DELAY_MSEC				20
 
 #define RSN_GTK_OUI_OFFSET				2
@@ -199,6 +200,9 @@
 	u8 ra[ETH_ALEN];
 	u32 total_pkts_size;
 	u32 is_11n_enabled;
+	u16 max_amsdu;
+	u16 pkt_count;
+	u8 ba_packet_thr;
 };
 
 struct mwifiex_tid_tbl {
@@ -431,6 +435,9 @@
 	u8 wmm_enabled;
 	u8 wmm_qosinfo;
 	struct mwifiex_wmm_desc wmm;
+	struct list_head sta_list;
+	/* spin lock for associated station list */
+	spinlock_t sta_list_spinlock;
 	struct list_head tx_ba_stream_tbl_ptr;
 	/* spin lock for tx_ba_stream_tbl_ptr queue */
 	spinlock_t tx_ba_stream_tbl_lock;
@@ -486,6 +493,7 @@
 	u16 assocresp_idx;
 	u16 rsn_idx;
 	struct timer_list scan_delay_timer;
+	u8 ap_11n_enabled;
 };
 
 enum mwifiex_ba_status {
@@ -550,6 +558,19 @@
 	u64 fw_tsf;
 };
 
+/* This is AP specific structure which stores information
+ * about associated STA
+ */
+struct mwifiex_sta_node {
+	struct list_head list;
+	u8 mac_addr[ETH_ALEN];
+	u8 is_wmm_enabled;
+	u8 is_11n_enabled;
+	u8 ampdu_sta[MAX_NUM_TID];
+	u16 rx_seq[MAX_NUM_TID];
+	u16 max_amsdu;
+};
+
 struct mwifiex_if_ops {
 	int (*init_if) (struct mwifiex_adapter *);
 	void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,6 +711,9 @@
 	u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
 	u16 max_mgmt_ie_index;
 	u8 scan_delay_cnt;
+	u8 empty_tx_q_cnt;
+	atomic_t is_tx_received;
+	atomic_t pending_bridged_pkts;
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -780,7 +804,15 @@
 				struct host_cmd_ds_command *resp);
 int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
 				  struct sk_buff *skb);
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+				  struct sk_buff *skb);
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+				  struct sk_buff *skb);
 int mwifiex_process_sta_event(struct mwifiex_private *);
+int mwifiex_process_uap_event(struct mwifiex_private *);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
 void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
 int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
 int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
@@ -949,9 +981,9 @@
 			  const struct mwifiex_user_scan_cfg *user_scan_in);
 int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
 
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
-		       int key_len, u8 key_index, const u8 *mac_addr,
-		       int disable);
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+		       const u8 *key, int key_len, u8 key_index,
+		       const u8 *mac_addr, int disable);
 
 int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
 
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 04dc7ca..215d07e6 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -989,6 +989,8 @@
 			*max_chan_per_scan = 2;
 		else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
 			*max_chan_per_scan = 3;
+		else
+			*max_chan_per_scan = 4;
 	}
 }
 
@@ -1433,9 +1435,9 @@
 			if (ret)
 				dev_err(priv->adapter->dev, "cannot find ssid "
 					"%s\n", bss_desc->ssid.ssid);
-				break;
+			break;
 		default:
-				ret = 0;
+			ret = 0;
 		}
 	}
 
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index df3a33c..0cc3406 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -610,7 +610,7 @@
 		memcpy(&key_material->key_param_set.key[2],
 		       enc_key->key_material, enc_key->key_len);
 		memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
-		       enc_key->wapi_rxpn, WAPI_RXPN_LEN);
+		       enc_key->pn, PN_LEN);
 		key_material->key_param_set.length =
 			cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
 
@@ -621,23 +621,38 @@
 		return ret;
 	}
 	if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
-		dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
-		key_material->key_param_set.key_type_id =
-						cpu_to_le16(KEY_TYPE_ID_AES);
-		if (cmd_oid == KEY_INFO_ENABLED)
-			key_material->key_param_set.key_info =
+		if (enc_key->is_igtk_key) {
+			dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+			key_material->key_param_set.key_type_id =
+					cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
+			if (cmd_oid == KEY_INFO_ENABLED)
+				key_material->key_param_set.key_info =
 						cpu_to_le16(KEY_ENABLED);
-		else
-			key_material->key_param_set.key_info =
+			else
+				key_material->key_param_set.key_info =
 						cpu_to_le16(!KEY_ENABLED);
 
-		if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+			key_material->key_param_set.key_info |=
+							cpu_to_le16(KEY_IGTK);
+		} else {
+			dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+			key_material->key_param_set.key_type_id =
+						cpu_to_le16(KEY_TYPE_ID_AES);
+			if (cmd_oid == KEY_INFO_ENABLED)
+				key_material->key_param_set.key_info =
+						cpu_to_le16(KEY_ENABLED);
+			else
+				key_material->key_param_set.key_info =
+						cpu_to_le16(!KEY_ENABLED);
+
+			if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
 				/* AES pairwise key: unicast */
-			key_material->key_param_set.key_info |=
+				key_material->key_param_set.key_info |=
 						cpu_to_le16(KEY_UNICAST);
-		else		/* AES group key: multicast */
-			key_material->key_param_set.key_info |=
+			else	/* AES group key: multicast */
+				key_material->key_param_set.key_info |=
 							cpu_to_le16(KEY_MCAST);
+		}
 	} else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
 		dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
 		key_material->key_param_set.key_type_id =
@@ -668,6 +683,24 @@
 		key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
 				+ sizeof(struct mwifiex_ie_types_header);
 
+		if (le16_to_cpu(key_material->key_param_set.key_type_id) ==
+							KEY_TYPE_ID_AES_CMAC) {
+			struct mwifiex_cmac_param *param =
+					(void *)key_material->key_param_set.key;
+
+			memcpy(param->ipn, enc_key->pn, IGTK_PN_LEN);
+			memcpy(param->key, enc_key->key_material,
+			       WLAN_KEY_LEN_AES_CMAC);
+
+			key_param_len = sizeof(struct mwifiex_cmac_param);
+			key_material->key_param_set.key_len =
+						cpu_to_le16(key_param_len);
+			key_param_len += KEYPARAMSET_FIXED_LEN;
+			key_material->key_param_set.length =
+						cpu_to_le16(key_param_len);
+			key_param_len += sizeof(struct mwifiex_ie_types_header);
+		}
+
 		cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
 					+ key_param_len);
 
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index b8614a8..dff51d5 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -184,10 +184,9 @@
 int mwifiex_process_sta_event(struct mwifiex_private *priv)
 {
 	struct mwifiex_adapter *adapter = priv->adapter;
-	int len, ret = 0;
+	int ret = 0;
 	u32 eventcause = adapter->event_cause;
-	struct station_info sinfo;
-	struct mwifiex_assoc_event *event;
+	u16 ctrl;
 
 	switch (eventcause) {
 	case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -279,10 +278,16 @@
 
 	case EVENT_MIC_ERR_UNICAST:
 		dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+		cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+					     NL80211_KEYTYPE_PAIRWISE,
+					     -1, NULL, GFP_KERNEL);
 		break;
 
 	case EVENT_MIC_ERR_MULTICAST:
 		dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+		cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+					     NL80211_KEYTYPE_GROUP,
+					     -1, NULL, GFP_KERNEL);
 		break;
 	case EVENT_MIB_CHANGED:
 	case EVENT_INIT_DONE:
@@ -384,11 +389,11 @@
 					      adapter->event_body);
 		break;
 	case EVENT_AMSDU_AGGR_CTRL:
-		dev_dbg(adapter->dev, "event:  AMSDU_AGGR_CTRL %d\n",
-			*(u16 *) adapter->event_body);
+		ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+		dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
 		adapter->tx_buf_size =
-			min(adapter->curr_tx_buf_size,
-			    le16_to_cpu(*(__le16 *) adapter->event_body));
+				min_t(u16, adapter->curr_tx_buf_size, ctrl);
 		dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
 			adapter->tx_buf_size);
 		break;
@@ -405,51 +410,6 @@
 		dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
 		break;
 
-	case EVENT_UAP_STA_ASSOC:
-		memset(&sinfo, 0, sizeof(sinfo));
-		event = (struct mwifiex_assoc_event *)
-			(adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-		if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
-			len = -1;
-
-			if (ieee80211_is_assoc_req(event->frame_control))
-				len = 0;
-			else if (ieee80211_is_reassoc_req(event->frame_control))
-				/* There will be ETH_ALEN bytes of
-				 * current_ap_addr before the re-assoc ies.
-				 */
-				len = ETH_ALEN;
-
-			if (len != -1) {
-				sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
-				sinfo.assoc_req_ies = &event->data[len];
-				len = (u8 *)sinfo.assoc_req_ies -
-				      (u8 *)&event->frame_control;
-				sinfo.assoc_req_ies_len =
-					le16_to_cpu(event->len) - (u16)len;
-			}
-		}
-		cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
-				 GFP_KERNEL);
-		break;
-	case EVENT_UAP_STA_DEAUTH:
-		cfg80211_del_sta(priv->netdev, adapter->event_body +
-				 MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
-		break;
-	case EVENT_UAP_BSS_IDLE:
-		priv->media_connected = false;
-		break;
-	case EVENT_UAP_BSS_ACTIVE:
-		priv->media_connected = true;
-		break;
-	case EVENT_UAP_BSS_START:
-		dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
-		memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
-		break;
-	case EVENT_UAP_MIC_COUNTERMEASURES:
-		/* For future development */
-		dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
-		break;
 	default:
 		dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
 			eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index fb21360..3f02597 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -942,20 +942,26 @@
  * This function allocates the IOCTL request buffer, fills it
  * with requisite parameters and calls the IOCTL handler.
  */
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
-			int key_len, u8 key_index,
-			const u8 *mac_addr, int disable)
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+		       const u8 *key, int key_len, u8 key_index,
+		       const u8 *mac_addr, int disable)
 {
 	struct mwifiex_ds_encrypt_key encrypt_key;
 
 	memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
 	encrypt_key.key_len = key_len;
+
+	if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+		encrypt_key.is_igtk_key = true;
+
 	if (!disable) {
 		encrypt_key.key_index = key_index;
 		if (key_len)
 			memcpy(encrypt_key.key_material, key, key_len);
 		if (mac_addr)
 			memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
+		if (kp && kp->seq && kp->seq_len)
+			memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
 	} else {
 		encrypt_key.key_disable = true;
 		if (mac_addr)
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 02ce3b7..d91d5c0 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -54,8 +54,8 @@
 
 	local_rx_pd = (struct rxpd *) (skb->data);
 
-	rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
-				local_rx_pd->rx_pkt_offset);
+	rx_pkt_hdr = (void *)local_rx_pd +
+		     le16_to_cpu(local_rx_pd->rx_pkt_offset);
 
 	if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
 		    rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
@@ -125,7 +125,7 @@
 	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
 	struct rx_packet_hdr *rx_pkt_hdr;
 	u8 ta[ETH_ALEN];
-	u16 rx_pkt_type;
+	u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
 	struct mwifiex_private *priv =
 			mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
 					       rx_info->bss_type);
@@ -134,16 +134,17 @@
 		return -1;
 
 	local_rx_pd = (struct rxpd *) (skb->data);
-	rx_pkt_type = local_rx_pd->rx_pkt_type;
+	rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
+	rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+	rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
+	seq_num = le16_to_cpu(local_rx_pd->seq_num);
 
-	rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
-					local_rx_pd->rx_pkt_offset);
+	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
 
-	if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) >
-	    (u16) skb->len) {
-		dev_err(adapter->dev, "wrong rx packet: len=%d,"
-			" rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
-		       local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
+	if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
+		dev_err(adapter->dev,
+			"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+			skb->len, rx_pkt_offset, rx_pkt_length);
 		priv->stats.rx_dropped++;
 
 		if (adapter->if_ops.data_complete)
@@ -154,14 +155,14 @@
 		return ret;
 	}
 
-	if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) {
+	if (rx_pkt_type == PKT_TYPE_AMSDU) {
 		struct sk_buff_head list;
 		struct sk_buff *rx_skb;
 
 		__skb_queue_head_init(&list);
 
-		skb_pull(skb, local_rx_pd->rx_pkt_offset);
-		skb_trim(skb, local_rx_pd->rx_pkt_length);
+		skb_pull(skb, rx_pkt_offset);
+		skb_trim(skb, rx_pkt_length);
 
 		ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
 					 priv->wdev->iftype, 0, false);
@@ -189,17 +190,14 @@
 		memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
 	} else {
 		if (rx_pkt_type != PKT_TYPE_BAR)
-			priv->rx_seq[local_rx_pd->priority] =
-						local_rx_pd->seq_num;
+			priv->rx_seq[local_rx_pd->priority] = seq_num;
 		memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
 		       ETH_ALEN);
 	}
 
 	/* Reorder and send to OS */
-	ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
-					     local_rx_pd->priority, ta,
-					     (u8) local_rx_pd->rx_pkt_type,
-					     skb);
+	ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
+					 ta, (u8) rx_pkt_type, skb);
 
 	if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
 		if (adapter->if_ops.data_complete)
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index cecb272..985073d 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -51,6 +51,9 @@
 	rx_info->bss_num = priv->bss_num;
 	rx_info->bss_type = priv->bss_type;
 
+	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+		return mwifiex_process_uap_rx_packet(adapter, skb);
+
 	return mwifiex_process_sta_rx_packet(adapter, skb);
 }
 EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -157,6 +160,8 @@
 		priv->stats.tx_errors++;
 	}
 
+	if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+		atomic_dec_return(&adapter->pending_bridged_pkts);
 	if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
 		goto done;
 
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index f40e93fe..c10aac0 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,6 +167,7 @@
 	if (ht_ie) {
 		memcpy(&bss_cfg->ht_cap, ht_ie + 2,
 		       sizeof(struct ieee80211_ht_cap));
+		priv->ap_11n_enabled = 1;
 	} else {
 		memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
 		bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
new file mode 100644
index 0000000..a33fa39
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -0,0 +1,290 @@
+/*
+ * Marvell Wireless LAN device driver: AP event handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "main.h"
+#include "11n.h"
+
+/*
+ * This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+	struct mwifiex_sta_node *node;
+
+	if (!mac)
+		return NULL;
+
+	list_for_each_entry(node, &priv->sta_list, list) {
+		if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+			return node;
+	}
+
+	return NULL;
+}
+
+/*
+ * This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+static struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+	struct mwifiex_sta_node *node;
+	unsigned long flags;
+
+	if (!mac)
+		return NULL;
+
+	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+	node = mwifiex_get_sta_entry(priv, mac);
+	if (node)
+		goto done;
+
+	node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
+	if (!node)
+		goto done;
+
+	memcpy(node->mac_addr, mac, ETH_ALEN);
+	list_add_tail(&node->list, &priv->sta_list);
+
+done:
+	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+	return node;
+}
+
+/*
+ * This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+static void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+		       int ies_len, struct mwifiex_sta_node *node)
+{
+	const struct ieee80211_ht_cap *ht_cap;
+
+	if (!ies)
+		return;
+
+	ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+	if (ht_cap) {
+		node->is_11n_enabled = 1;
+		node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+				  IEEE80211_HT_CAP_MAX_AMSDU ?
+				  MWIFIEX_TX_DATA_BUF_SIZE_8K :
+				  MWIFIEX_TX_DATA_BUF_SIZE_4K;
+	} else {
+		node->is_11n_enabled = 0;
+	}
+
+	return;
+}
+
+/*
+ * This function will delete a station entry from station list
+ */
+static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+	struct mwifiex_sta_node *node, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+	node = mwifiex_get_sta_entry(priv, mac);
+	if (node) {
+		list_for_each_entry_safe(node, tmp, &priv->sta_list,
+					 list) {
+			list_del(&node->list);
+			kfree(node);
+		}
+	}
+
+	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+	return;
+}
+
+/*
+ * This function will delete all stations from associated station list.
+ */
+static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+	struct mwifiex_sta_node *node, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+	list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+		list_del(&node->list);
+		kfree(node);
+	}
+
+	INIT_LIST_HEAD(&priv->sta_list);
+	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+	return;
+}
+
+/*
+ * This function handles AP interface specific events generated by firmware.
+ *
+ * Event specific routines are called by this function based
+ * upon the generated event cause.
+ *
+ *
+ * Events supported for AP -
+ *      - EVENT_UAP_STA_ASSOC
+ *      - EVENT_UAP_STA_DEAUTH
+ *      - EVENT_UAP_BSS_ACTIVE
+ *      - EVENT_UAP_BSS_START
+ *      - EVENT_UAP_BSS_IDLE
+ *      - EVENT_UAP_MIC_COUNTERMEASURES:
+ */
+int mwifiex_process_uap_event(struct mwifiex_private *priv)
+{
+	struct mwifiex_adapter *adapter = priv->adapter;
+	int len, i;
+	u32 eventcause = adapter->event_cause;
+	struct station_info sinfo;
+	struct mwifiex_assoc_event *event;
+	struct mwifiex_sta_node *node;
+	u8 *deauth_mac;
+	struct host_cmd_ds_11n_batimeout *ba_timeout;
+	u16 ctrl;
+
+	switch (eventcause) {
+	case EVENT_UAP_STA_ASSOC:
+		memset(&sinfo, 0, sizeof(sinfo));
+		event = (struct mwifiex_assoc_event *)
+			(adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
+		if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
+			len = -1;
+
+			if (ieee80211_is_assoc_req(event->frame_control))
+				len = 0;
+			else if (ieee80211_is_reassoc_req(event->frame_control))
+				/* There will be ETH_ALEN bytes of
+				 * current_ap_addr before the re-assoc ies.
+				 */
+				len = ETH_ALEN;
+
+			if (len != -1) {
+				sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+				sinfo.assoc_req_ies = &event->data[len];
+				len = (u8 *)sinfo.assoc_req_ies -
+				      (u8 *)&event->frame_control;
+				sinfo.assoc_req_ies_len =
+					le16_to_cpu(event->len) - (u16)len;
+			}
+		}
+		cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
+				 GFP_KERNEL);
+
+		node = mwifiex_add_sta_entry(priv, event->sta_addr);
+		if (!node) {
+			dev_warn(adapter->dev,
+				 "could not create station entry!\n");
+			return -1;
+		}
+
+		if (!priv->ap_11n_enabled)
+			break;
+
+		mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
+				       sinfo.assoc_req_ies_len, node);
+
+		for (i = 0; i < MAX_NUM_TID; i++) {
+			if (node->is_11n_enabled)
+				node->ampdu_sta[i] =
+					      priv->aggr_prio_tbl[i].ampdu_user;
+			else
+				node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+		}
+		memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
+		break;
+	case EVENT_UAP_STA_DEAUTH:
+		deauth_mac = adapter->event_body +
+			     MWIFIEX_UAP_EVENT_EXTRA_HEADER;
+		cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
+
+		if (priv->ap_11n_enabled) {
+			mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
+			mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
+		}
+		mwifiex_del_sta_entry(priv, deauth_mac);
+		break;
+	case EVENT_UAP_BSS_IDLE:
+		priv->media_connected = false;
+		mwifiex_clean_txrx(priv);
+		mwifiex_del_all_sta_list(priv);
+		break;
+	case EVENT_UAP_BSS_ACTIVE:
+		priv->media_connected = true;
+		break;
+	case EVENT_UAP_BSS_START:
+		dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+		memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
+		       ETH_ALEN);
+		break;
+	case EVENT_UAP_MIC_COUNTERMEASURES:
+		/* For future development */
+		dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+		break;
+	case EVENT_AMSDU_AGGR_CTRL:
+		ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+		dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
+		if (priv->media_connected) {
+			adapter->tx_buf_size =
+				min_t(u16, adapter->curr_tx_buf_size, ctrl);
+			dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
+				adapter->tx_buf_size);
+		}
+		break;
+	case EVENT_ADDBA:
+		dev_dbg(adapter->dev, "event: ADDBA Request\n");
+		if (priv->media_connected)
+			mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
+					       HostCmd_ACT_GEN_SET, 0,
+					       adapter->event_body);
+		break;
+	case EVENT_DELBA:
+		dev_dbg(adapter->dev, "event: DELBA Request\n");
+		if (priv->media_connected)
+			mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
+		break;
+	case EVENT_BA_STREAM_TIEMOUT:
+		dev_dbg(adapter->dev, "event:  BA Stream timeout\n");
+		if (priv->media_connected) {
+			ba_timeout = (void *)adapter->event_body;
+			mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
+		}
+		break;
+	default:
+		dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
+			eventcause);
+		break;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
new file mode 100644
index 0000000..6d814f0
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -0,0 +1,255 @@
+/*
+ * Marvell Wireless LAN device driver: AP TX and RX data handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "ioctl.h"
+#include "main.h"
+#include "wmm.h"
+#include "11n_aggr.h"
+#include "11n_rxreorder.h"
+
+static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+					 struct sk_buff *skb)
+{
+	struct mwifiex_adapter *adapter = priv->adapter;
+	struct uap_rxpd *uap_rx_pd;
+	struct rx_packet_hdr *rx_pkt_hdr;
+	struct sk_buff *new_skb;
+	struct mwifiex_txinfo *tx_info;
+	int hdr_chop;
+	struct timeval tv;
+	u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+	uap_rx_pd = (struct uap_rxpd *)(skb->data);
+	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+	if ((atomic_read(&adapter->pending_bridged_pkts) >=
+					     MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
+		dev_err(priv->adapter->dev,
+			"Tx: Bridge packet limit reached. Drop packet!\n");
+		kfree_skb(skb);
+		return;
+	}
+
+	if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
+		    rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
+		/* Chop off the rxpd + the excess memory from
+		 * 802.2/llc/snap header that was removed.
+		 */
+		hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
+	else
+		/* Chop off the rxpd */
+		hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
+
+	/* Chop off the leading header bytes so the it points
+	 * to the start of either the reconstructed EthII frame
+	 * or the 802.2/llc/snap frame.
+	 */
+	skb_pull(skb, hdr_chop);
+
+	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
+		dev_dbg(priv->adapter->dev,
+			"data: Tx: insufficient skb headroom %d\n",
+			skb_headroom(skb));
+		/* Insufficient skb headroom - allocate a new skb */
+		new_skb =
+			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+		if (unlikely(!new_skb)) {
+			dev_err(priv->adapter->dev,
+				"Tx: cannot allocate new_skb\n");
+			kfree_skb(skb);
+			priv->stats.tx_dropped++;
+			return;
+		}
+
+		kfree_skb(skb);
+		skb = new_skb;
+		dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
+			skb_headroom(skb));
+	}
+
+	tx_info = MWIFIEX_SKB_TXCB(skb);
+	tx_info->bss_num = priv->bss_num;
+	tx_info->bss_type = priv->bss_type;
+	tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+
+	do_gettimeofday(&tv);
+	skb->tstamp = timeval_to_ktime(tv);
+	mwifiex_wmm_add_buf_txqueue(priv, skb);
+	atomic_inc(&adapter->tx_pending);
+	atomic_inc(&adapter->pending_bridged_pkts);
+
+	if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
+		mwifiex_set_trans_start(priv->netdev);
+		mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+	}
+	return;
+}
+
+/*
+ * This function contains logic for AP packet forwarding.
+ *
+ * If a packet is multicast/broadcast, it is sent to kernel/upper layer
+ * as well as queued back to AP TX queue so that it can be sent to other
+ * associated stations.
+ * If a packet is unicast and RA is present in associated station list,
+ * it is again requeued into AP TX queue.
+ * If a packet is unicast and RA is not in associated station list,
+ * packet is forwarded to kernel to handle routing logic.
+ */
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+				  struct sk_buff *skb)
+{
+	struct mwifiex_adapter *adapter = priv->adapter;
+	struct uap_rxpd *uap_rx_pd;
+	struct rx_packet_hdr *rx_pkt_hdr;
+	u8 ra[ETH_ALEN];
+	struct sk_buff *skb_uap;
+
+	uap_rx_pd = (struct uap_rxpd *)(skb->data);
+	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+	/* don't do packet forwarding in disconnected state */
+	if (!priv->media_connected) {
+		dev_err(adapter->dev, "drop packet in disconnected state.\n");
+		dev_kfree_skb_any(skb);
+		return 0;
+	}
+
+	memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
+
+	if (is_multicast_ether_addr(ra)) {
+		skb_uap = skb_copy(skb, GFP_ATOMIC);
+		mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
+	} else {
+		if (mwifiex_get_sta_entry(priv, ra)) {
+			/* Requeue Intra-BSS packet */
+			mwifiex_uap_queue_bridged_pkt(priv, skb);
+			return 0;
+		}
+	}
+
+	/* Forward unicat/Inter-BSS packets to kernel. */
+	return mwifiex_process_rx_packet(adapter, skb);
+}
+
+/*
+ * This function processes the packet received on AP interface.
+ *
+ * The function looks into the RxPD and performs sanity tests on the
+ * received buffer to ensure its a valid packet before processing it
+ * further. If the packet is determined to be aggregated, it is
+ * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
+ *
+ * The completion callback is called after processing is complete.
+ */
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+				  struct sk_buff *skb)
+{
+	int ret;
+	struct uap_rxpd *uap_rx_pd;
+	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+	struct rx_packet_hdr *rx_pkt_hdr;
+	u16 rx_pkt_type;
+	u8 ta[ETH_ALEN], pkt_type;
+	struct mwifiex_sta_node *node;
+
+	struct mwifiex_private *priv =
+			mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
+					       rx_info->bss_type);
+
+	if (!priv)
+		return -1;
+
+	uap_rx_pd = (struct uap_rxpd *)(skb->data);
+	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
+	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+	if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
+	     le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
+		dev_err(adapter->dev,
+			"wrong rx packet: len=%d, offset=%d, length=%d\n",
+			skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+			le16_to_cpu(uap_rx_pd->rx_pkt_length));
+		priv->stats.rx_dropped++;
+
+		if (adapter->if_ops.data_complete)
+			adapter->if_ops.data_complete(adapter, skb);
+		else
+			dev_kfree_skb_any(skb);
+
+		return 0;
+	}
+
+	if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
+		struct sk_buff_head list;
+		struct sk_buff *rx_skb;
+
+		__skb_queue_head_init(&list);
+		skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
+		skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
+
+		ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+					 priv->wdev->iftype, 0, false);
+
+		while (!skb_queue_empty(&list)) {
+			rx_skb = __skb_dequeue(&list);
+			ret = mwifiex_recv_packet(adapter, rx_skb);
+			if (ret)
+				dev_err(adapter->dev,
+					"AP:Rx A-MSDU failed");
+		}
+
+		return 0;
+	}
+
+	memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
+
+	if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
+		node = mwifiex_get_sta_entry(priv, ta);
+		if (node)
+			node->rx_seq[uap_rx_pd->priority] =
+						le16_to_cpu(uap_rx_pd->seq_num);
+	}
+
+	if (!priv->ap_11n_enabled ||
+	    (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
+	    (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
+		ret = mwifiex_handle_uap_rx_forward(priv, skb);
+		return ret;
+	}
+
+	/* Reorder and send to kernel */
+	pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
+	ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
+					 uap_rx_pd->priority, ta, pkt_type,
+					 skb);
+
+	if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
+		if (adapter->if_ops.data_complete)
+			adapter->if_ops.data_complete(adapter, skb);
+		else
+			dev_kfree_skb_any(skb);
+	}
+
+	if (ret)
+		priv->stats.rx_dropped++;
+
+	return ret;
+}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 3fa4d41..8ccd699 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -127,6 +127,29 @@
 	return ra_list;
 }
 
+/* This function returns random no between 16 and 32 to be used as threshold
+ * for no of packets after which BA setup is initiated.
+ */
+static u8 mwifiex_get_random_ba_threshold(void)
+{
+	u32 sec, usec;
+	struct timeval ba_tstamp;
+	u8 ba_threshold;
+
+	/* setup ba_packet_threshold here random number between
+	 * [BA_SETUP_PACKET_OFFSET,
+	 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
+	 */
+
+	do_gettimeofday(&ba_tstamp);
+	sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
+	usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
+	ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
+						      + BA_SETUP_PACKET_OFFSET;
+
+	return ba_threshold;
+}
+
 /*
  * This function allocates and adds a RA list for all TIDs
  * with the given RA.
@@ -137,6 +160,12 @@
 	int i;
 	struct mwifiex_ra_list_tbl *ra_list;
 	struct mwifiex_adapter *adapter = priv->adapter;
+	struct mwifiex_sta_node *node;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+	node = mwifiex_get_sta_entry(priv, ra);
+	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 
 	for (i = 0; i < MAX_NUM_TID; ++i) {
 		ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -145,14 +174,24 @@
 		if (!ra_list)
 			break;
 
-		if (!mwifiex_queuing_ra_based(priv))
+		ra_list->is_11n_enabled = 0;
+		if (!mwifiex_queuing_ra_based(priv)) {
 			ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
-		else
-			ra_list->is_11n_enabled = false;
+		} else {
+			ra_list->is_11n_enabled =
+				      mwifiex_is_sta_11n_enabled(priv, node);
+			if (ra_list->is_11n_enabled)
+				ra_list->max_amsdu = node->max_amsdu;
+		}
 
 		dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
 			ra_list, ra_list->is_11n_enabled);
 
+		if (ra_list->is_11n_enabled) {
+			ra_list->pkt_count = 0;
+			ra_list->ba_packet_thr =
+					      mwifiex_get_random_ba_threshold();
+		}
 		list_add_tail(&ra_list->list,
 			      &priv->wmm.tid_tbl_ptr[i].ra_list);
 
@@ -647,6 +686,7 @@
 	skb_queue_tail(&ra_list->skb_head, skb);
 
 	ra_list->total_pkts_size += skb->len;
+	ra_list->pkt_count++;
 
 	atomic_inc(&priv->wmm.tx_pkts_queued);
 
@@ -986,10 +1026,17 @@
 {
 	int count = 0, total_size = 0;
 	struct sk_buff *skb, *tmp;
+	int max_amsdu_size;
+
+	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
+	    ptr->is_11n_enabled)
+		max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
+	else
+		max_amsdu_size = max_buf_size;
 
 	skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
 		total_size += skb->len;
-		if (total_size >= max_buf_size)
+		if (total_size >= max_amsdu_size)
 			break;
 		if (++count >= MIN_NUM_AMSDU)
 			return true;
@@ -1050,6 +1097,7 @@
 		skb_queue_tail(&ptr->skb_head, skb);
 
 		ptr->total_pkts_size += skb->len;
+		ptr->pkt_count++;
 		tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
 		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 				       ra_list_flags);
@@ -1231,7 +1279,8 @@
 		/* ra_list_spinlock has been freed in
 		   mwifiex_send_single_packet() */
 	} else {
-		if (mwifiex_is_ampdu_allowed(priv, tid)) {
+		if (mwifiex_is_ampdu_allowed(priv, tid) &&
+		    ptr->pkt_count > ptr->ba_packet_thr) {
 			if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
 				mwifiex_create_ba_tbl(priv, ptr->ra, tid,
 						      BA_SETUP_INPROGRESS);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 224e03a..5099e53 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1830,12 +1830,14 @@
 }
 
 static void
-mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
+mwl8k_txq_xmit(struct ieee80211_hw *hw,
+	       int index,
+	       struct ieee80211_sta *sta,
+	       struct sk_buff *skb)
 {
 	struct mwl8k_priv *priv = hw->priv;
 	struct ieee80211_tx_info *tx_info;
 	struct mwl8k_vif *mwl8k_vif;
-	struct ieee80211_sta *sta;
 	struct ieee80211_hdr *wh;
 	struct mwl8k_tx_queue *txq;
 	struct mwl8k_tx_desc *tx;
@@ -1867,7 +1869,6 @@
 	wh = &((struct mwl8k_dma_data *)skb->data)->wh;
 
 	tx_info = IEEE80211_SKB_CB(skb);
-	sta = tx_info->control.sta;
 	mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
 
 	if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -2019,8 +2020,8 @@
 	tx->pkt_phys_addr = cpu_to_le32(dma);
 	tx->pkt_len = cpu_to_le16(skb->len);
 	tx->rate_info = 0;
-	if (!priv->ap_fw && tx_info->control.sta != NULL)
-		tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+	if (!priv->ap_fw && sta != NULL)
+		tx->peer_id = MWL8K_STA(sta)->peer_id;
 	else
 		tx->peer_id = 0;
 
@@ -4364,7 +4365,9 @@
 /*
  * Core driver operations.
  */
-static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw,
+		     struct ieee80211_tx_control *control,
+		     struct sk_buff *skb)
 {
 	struct mwl8k_priv *priv = hw->priv;
 	int index = skb_get_queue_mapping(skb);
@@ -4376,7 +4379,7 @@
 		return;
 	}
 
-	mwl8k_txq_xmit(hw, index, skb);
+	mwl8k_txq_xmit(hw, index, control->sta, skb);
 }
 
 static int mwl8k_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 1403709..1ef1bfe 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -76,6 +76,7 @@
 	u16 freq;
 	u16 data;
 	int index;
+	int max_power;
 	enum ieee80211_band band;
 };
 
@@ -173,6 +174,7 @@
 	for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
 			   (i < list->entries); i++) {
 		struct p54_channel_entry *chan = &list->channels[i];
+		struct ieee80211_channel *dest = &tmp->channels[j];
 
 		if (chan->band != band)
 			continue;
@@ -190,14 +192,15 @@
 			continue;
 		}
 
-		tmp->channels[j].band = chan->band;
-		tmp->channels[j].center_freq = chan->freq;
+		dest->band = chan->band;
+		dest->center_freq = chan->freq;
+		dest->max_power = chan->max_power;
 		priv->survey[*chan_num].channel = &tmp->channels[j];
 		priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
 			SURVEY_INFO_CHANNEL_TIME |
 			SURVEY_INFO_CHANNEL_TIME_BUSY |
 			SURVEY_INFO_CHANNEL_TIME_TX;
-		tmp->channels[j].hw_value = (*chan_num);
+		dest->hw_value = (*chan_num);
 		j++;
 		(*chan_num)++;
 	}
@@ -229,10 +232,11 @@
 	return ret;
 }
 
-static void p54_update_channel_param(struct p54_channel_list *list,
-				     u16 freq, u16 data)
+static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list,
+							  u16 freq, u16 data)
 {
-	int band, i;
+	int i;
+	struct p54_channel_entry *entry = NULL;
 
 	/*
 	 * usually all lists in the eeprom are mostly sorted.
@@ -241,30 +245,78 @@
 	 */
 	for (i = list->entries; i >= 0; i--) {
 		if (freq == list->channels[i].freq) {
-			list->channels[i].data |= data;
+			entry = &list->channels[i];
 			break;
 		}
 	}
 
 	if ((i < 0) && (list->entries < list->max_entries)) {
 		/* entry does not exist yet. Initialize a new one. */
-		band = p54_get_band_from_freq(freq);
+		int band = p54_get_band_from_freq(freq);
 
 		/*
 		 * filter out frequencies which don't belong into
 		 * any supported band.
 		 */
-		if (band < 0)
-			return ;
+		if (band >= 0) {
+			i = list->entries++;
+			list->band_channel_num[band]++;
 
-		i = list->entries++;
-		list->band_channel_num[band]++;
+			entry = &list->channels[i];
+			entry->freq = freq;
+			entry->band = band;
+			entry->index = ieee80211_frequency_to_channel(freq);
+			entry->max_power = 0;
+			entry->data = 0;
+		}
+	}
 
-		list->channels[i].freq = freq;
-		list->channels[i].data = data;
-		list->channels[i].band = band;
-		list->channels[i].index = ieee80211_frequency_to_channel(freq);
-		/* TODO: parse output_limit and fill max_power */
+	if (entry)
+		entry->data |= data;
+
+	return entry;
+}
+
+static int p54_get_maxpower(struct p54_common *priv, void *data)
+{
+	switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) {
+	case PDR_SYNTH_FRONTEND_LONGBOW: {
+		struct pda_channel_output_limit_longbow *pda = data;
+		int j;
+		u16 rawpower = 0;
+		pda = data;
+		for (j = 0; j < ARRAY_SIZE(pda->point); j++) {
+			struct pda_channel_output_limit_point_longbow *point =
+				&pda->point[j];
+			rawpower = max_t(u16,
+				rawpower, le16_to_cpu(point->val_qpsk));
+			rawpower = max_t(u16,
+				rawpower, le16_to_cpu(point->val_bpsk));
+			rawpower = max_t(u16,
+				rawpower, le16_to_cpu(point->val_16qam));
+			rawpower = max_t(u16,
+				rawpower, le16_to_cpu(point->val_64qam));
+		}
+		/* longbow seems to use 1/16 dBm units */
+		return rawpower / 16;
+		}
+
+	case PDR_SYNTH_FRONTEND_DUETTE3:
+	case PDR_SYNTH_FRONTEND_DUETTE2:
+	case PDR_SYNTH_FRONTEND_FRISBEE:
+	case PDR_SYNTH_FRONTEND_XBOW: {
+		struct pda_channel_output_limit *pda = data;
+		u8 rawpower = 0;
+		rawpower = max(rawpower, pda->val_qpsk);
+		rawpower = max(rawpower, pda->val_bpsk);
+		rawpower = max(rawpower, pda->val_16qam);
+		rawpower = max(rawpower, pda->val_64qam);
+		/* raw values are in 1/4 dBm units */
+		return rawpower / 4;
+		}
+
+	default:
+		return 20;
 	}
 }
 
@@ -315,12 +367,19 @@
 		}
 
 		if (i < priv->output_limit->entries) {
-			freq = le16_to_cpup((__le16 *) (i *
-					    priv->output_limit->entry_size +
-					    priv->output_limit->offset +
-					    priv->output_limit->data));
+			struct p54_channel_entry *tmp;
 
-			p54_update_channel_param(list, freq, CHAN_HAS_LIMIT);
+			void *data = (void *) ((unsigned long) i *
+				priv->output_limit->entry_size +
+				priv->output_limit->offset +
+				priv->output_limit->data);
+
+			freq = le16_to_cpup((__le16 *) data);
+			tmp = p54_update_channel_param(list, freq,
+						       CHAN_HAS_LIMIT);
+			if (tmp) {
+				tmp->max_power = p54_get_maxpower(priv, data);
+			}
 		}
 
 		if (i < priv->curve_data->entries) {
@@ -834,11 +893,12 @@
 		goto err;
 	}
 
+	priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
+
 	err = p54_generate_channel_lists(dev);
 	if (err)
 		goto err;
 
-	priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
 	if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
 		p54_init_xbow_synth(priv);
 	if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h
index afde72b..20ebe39 100644
--- a/drivers/net/wireless/p54/eeprom.h
+++ b/drivers/net/wireless/p54/eeprom.h
@@ -57,6 +57,18 @@
 	u8 rate_set_size;
 } __packed;
 
+struct pda_channel_output_limit_point_longbow {
+	__le16 val_bpsk;
+	__le16 val_qpsk;
+	__le16 val_16qam;
+	__le16 val_64qam;
+} __packed;
+
+struct pda_channel_output_limit_longbow {
+	__le16 freq;
+	struct pda_channel_output_limit_point_longbow point[3];
+} __packed;
+
 struct pda_pa_curve_data_sample_rev0 {
 	u8 rf_power;
 	u8 pa_detector;
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 3d8d622..de1d46b 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,9 @@
 void p54_unregister_leds(struct p54_common *priv);
 
 /* xmit functions */
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev,
+		  struct ieee80211_tx_control *control,
+		  struct sk_buff *skb);
 int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
 void p54_tx(struct p54_common *priv, struct sk_buff *skb);
 
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 7cffea7..5e91ad0 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -158,7 +158,7 @@
 	 * to cancel the old beacon template by hand, instead the firmware
 	 * will release the previous one through the feedback mechanism.
 	 */
-	p54_tx_80211(priv->hw, beacon);
+	p54_tx_80211(priv->hw, NULL, beacon);
 	priv->tsf_high32 = 0;
 	priv->tsf_low32 = 0;
 
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 89318ad..b439079 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -488,6 +488,58 @@
 	return 0;
 }
 
+static void p54p_firmware_step2(const struct firmware *fw,
+				void *context)
+{
+	struct p54p_priv *priv = context;
+	struct ieee80211_hw *dev = priv->common.hw;
+	struct pci_dev *pdev = priv->pdev;
+	int err;
+
+	if (!fw) {
+		dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
+		err = -ENOENT;
+		goto out;
+	}
+
+	priv->firmware = fw;
+
+	err = p54p_open(dev);
+	if (err)
+		goto out;
+	err = p54_read_eeprom(dev);
+	p54p_stop(dev);
+	if (err)
+		goto out;
+
+	err = p54_register_common(dev, &pdev->dev);
+	if (err)
+		goto out;
+
+out:
+
+	complete(&priv->fw_loaded);
+
+	if (err) {
+		struct device *parent = pdev->dev.parent;
+
+		if (parent)
+			device_lock(parent);
+
+		/*
+		 * This will indirectly result in a call to p54p_remove.
+		 * Hence, we don't need to bother with freeing any
+		 * allocated ressources at all.
+		 */
+		device_release_driver(&pdev->dev);
+
+		if (parent)
+			device_unlock(parent);
+	}
+
+	pci_dev_put(pdev);
+}
+
 static int __devinit p54p_probe(struct pci_dev *pdev,
 				const struct pci_device_id *id)
 {
@@ -496,6 +548,7 @@
 	unsigned long mem_addr, mem_len;
 	int err;
 
+	pci_dev_get(pdev);
 	err = pci_enable_device(pdev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
@@ -537,6 +590,7 @@
 	priv = dev->priv;
 	priv->pdev = pdev;
 
+	init_completion(&priv->fw_loaded);
 	SET_IEEE80211_DEV(dev, &pdev->dev);
 	pci_set_drvdata(pdev, dev);
 
@@ -561,32 +615,12 @@
 	spin_lock_init(&priv->lock);
 	tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
 
-	err = request_firmware(&priv->firmware, "isl3886pci",
-			       &priv->pdev->dev);
-	if (err) {
-		dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
-		err = request_firmware(&priv->firmware, "isl3886",
-				       &priv->pdev->dev);
-		if (err)
-			goto err_free_common;
-	}
+	err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
+				      &priv->pdev->dev, GFP_KERNEL,
+				      priv, p54p_firmware_step2);
+	if (!err)
+		return 0;
 
-	err = p54p_open(dev);
-	if (err)
-		goto err_free_common;
-	err = p54_read_eeprom(dev);
-	p54p_stop(dev);
-	if (err)
-		goto err_free_common;
-
-	err = p54_register_common(dev, &pdev->dev);
-	if (err)
-		goto err_free_common;
-
-	return 0;
-
- err_free_common:
-	release_firmware(priv->firmware);
 	pci_free_consistent(pdev, sizeof(*priv->ring_control),
 			    priv->ring_control, priv->ring_control_dma);
 
@@ -601,6 +635,7 @@
 	pci_release_regions(pdev);
  err_disable_dev:
 	pci_disable_device(pdev);
+	pci_dev_put(pdev);
 	return err;
 }
 
@@ -612,8 +647,9 @@
 	if (!dev)
 		return;
 
-	p54_unregister_common(dev);
 	priv = dev->priv;
+	wait_for_completion(&priv->fw_loaded);
+	p54_unregister_common(dev);
 	release_firmware(priv->firmware);
 	pci_free_consistent(pdev, sizeof(*priv->ring_control),
 			    priv->ring_control, priv->ring_control_dma);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 7aa509f..68405c1 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -105,6 +105,7 @@
 	struct sk_buff *tx_buf_data[32];
 	struct sk_buff *tx_buf_mgmt[4];
 	struct completion boot_comp;
+	struct completion fw_loaded;
 };
 
 #endif /* P54USB_H */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 7f207b6..effb044 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -42,7 +42,7 @@
  * whenever you add a new device.
  */
 
-static struct usb_device_id p54u_table[] __devinitdata = {
+static struct usb_device_id p54u_table[] = {
 	/* Version 1 devices (pci chip + net2280) */
 	{USB_DEVICE(0x0411, 0x0050)},	/* Buffalo WLI2-USB2-G54 */
 	{USB_DEVICE(0x045e, 0x00c2)},	/* Microsoft MN-710 */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f38786e..5861e13 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -676,8 +676,9 @@
 EXPORT_SYMBOL_GPL(p54_rx);
 
 static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
-				struct ieee80211_tx_info *info, u8 *queue,
-				u32 *extra_len, u16 *flags, u16 *aid,
+				struct ieee80211_tx_info *info,
+				struct ieee80211_sta *sta,
+				u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
 				bool *burst_possible)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -746,8 +747,8 @@
 			}
 		}
 
-		if (info->control.sta)
-			*aid = info->control.sta->aid;
+		if (sta)
+			*aid = sta->aid;
 		break;
 	}
 }
@@ -767,7 +768,9 @@
 	}
 }
 
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev,
+		  struct ieee80211_tx_control *control,
+		  struct sk_buff *skb)
 {
 	struct p54_common *priv = dev->priv;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -784,7 +787,7 @@
 	u8 nrates = 0, nremaining = 8;
 	bool burst_allowed = false;
 
-	p54_tx_80211_header(priv, skb, info, &queue, &extra_len,
+	p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
 			    &hdr_flags, &aid, &burst_allowed);
 
 	if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 241162e..7a4ae9e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1803,6 +1803,7 @@
 						struct cfg80211_pmksa *pmksa,
 						int max_pmkids)
 {
+	struct ndis_80211_pmkid *new_pmkids;
 	int i, err, newlen;
 	unsigned int count;
 
@@ -1833,11 +1834,12 @@
 	/* add new pmkid */
 	newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
 
-	pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
-	if (!pmkids) {
+	new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
+	if (!new_pmkids) {
 		err = -ENOMEM;
 		goto error;
 	}
+	pmkids = new_pmkids;
 
 	pmkids->length = cpu_to_le32(newlen);
 	pmkids->bssid_info_count = cpu_to_le32(count + 1);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88455b1..cb8c2ac 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,67 @@
 	mutex_unlock(&rt2x00dev->csr_mutex);
 }
 
+static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	int i, count;
+
+	rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+	if (rt2x00_get_field32(reg, WLAN_EN))
+		return 0;
+
+	rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
+	rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
+	rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
+	rt2x00_set_field32(&reg, WLAN_EN, 1);
+	rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+
+	udelay(REGISTER_BUSY_DELAY);
+
+	count = 0;
+	do {
+		/*
+		 * Check PLL_LD & XTAL_RDY.
+		 */
+		for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+			rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
+			if (rt2x00_get_field32(reg, PLL_LD) &&
+			    rt2x00_get_field32(reg, XTAL_RDY))
+				break;
+			udelay(REGISTER_BUSY_DELAY);
+		}
+
+		if (i >= REGISTER_BUSY_COUNT) {
+
+			if (count >= 10)
+				return -EIO;
+
+			rt2800_register_write(rt2x00dev, 0x58, 0x018);
+			udelay(REGISTER_BUSY_DELAY);
+			rt2800_register_write(rt2x00dev, 0x58, 0x418);
+			udelay(REGISTER_BUSY_DELAY);
+			rt2800_register_write(rt2x00dev, 0x58, 0x618);
+			udelay(REGISTER_BUSY_DELAY);
+			count++;
+		} else {
+			count = 0;
+		}
+
+		rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+		rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
+		rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
+		rt2x00_set_field32(&reg, WLAN_RESET, 1);
+		rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+		udelay(10);
+		rt2x00_set_field32(&reg, WLAN_RESET, 0);
+		rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+		udelay(10);
+		rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
+	} while (count != 0);
+
+	return 0;
+}
+
 void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
 			const u8 command, const u8 token,
 			const u8 arg0, const u8 arg1)
@@ -400,6 +461,13 @@
 {
 	unsigned int i;
 	u32 reg;
+	int retval;
+
+	if (rt2x00_rt(rt2x00dev, RT3290)) {
+		retval = rt2800_enable_wlan_rt3290(rt2x00dev);
+		if (retval)
+			return -EBUSY;
+	}
 
 	/*
 	 * If driver doesn't wake up firmware here,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 235376e..98aa426 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -980,66 +980,6 @@
 	return rt2800_validate_eeprom(rt2x00dev);
 }
 
-static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
-{
-	u32 reg;
-	int i, count;
-
-	rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
-	if (rt2x00_get_field32(reg, WLAN_EN))
-		return 0;
-
-	rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
-	rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
-	rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
-	rt2x00_set_field32(&reg, WLAN_EN, 1);
-	rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-
-	udelay(REGISTER_BUSY_DELAY);
-
-	count = 0;
-	do {
-		/*
-		 * Check PLL_LD & XTAL_RDY.
-		 */
-		for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-			rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
-			if (rt2x00_get_field32(reg, PLL_LD) &&
-			    rt2x00_get_field32(reg, XTAL_RDY))
-				break;
-			udelay(REGISTER_BUSY_DELAY);
-		}
-
-		if (i >= REGISTER_BUSY_COUNT) {
-
-			if (count >= 10)
-				return -EIO;
-
-			rt2800_register_write(rt2x00dev, 0x58, 0x018);
-			udelay(REGISTER_BUSY_DELAY);
-			rt2800_register_write(rt2x00dev, 0x58, 0x418);
-			udelay(REGISTER_BUSY_DELAY);
-			rt2800_register_write(rt2x00dev, 0x58, 0x618);
-			udelay(REGISTER_BUSY_DELAY);
-			count++;
-		} else {
-			count = 0;
-		}
-
-		rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
-		rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
-		rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
-		rt2x00_set_field32(&reg, WLAN_RESET, 1);
-		rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-		udelay(10);
-		rt2x00_set_field32(&reg, WLAN_RESET, 0);
-		rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-		udelay(10);
-		rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
-	} while (count != 0);
-
-	return 0;
-}
 static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
 	int retval;
@@ -1063,17 +1003,6 @@
 		return retval;
 
 	/*
-	 * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
-	 * clk for rt3290. That avoid the MCU fail in start phase.
-	 */
-	if (rt2x00_rt(rt2x00dev, RT3290)) {
-		retval = rt2800_enable_wlan_rt3290(rt2x00dev);
-
-		if (retval)
-			return retval;
-	}
-
-	/*
 	 * This device has multiple filters for control frames
 	 * and has a separate filter for PS Poll frames.
 	 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8afb546..f991e8b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1287,7 +1287,9 @@
 /*
  * mac80211 handlers.
  */
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+		  struct ieee80211_tx_control *control,
+		  struct sk_buff *skb);
 int rt2x00mac_start(struct ieee80211_hw *hw);
 void rt2x00mac_stop(struct ieee80211_hw *hw);
 int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index a6b88bd..a59048f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -194,7 +194,7 @@
 	 */
 	skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
 	while (skb) {
-		rt2x00mac_tx(rt2x00dev->hw, skb);
+		rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
 		skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
 	}
 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 4ff26c2..c3d0f2f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,7 +99,9 @@
 	return retval;
 }
 
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+		  struct ieee80211_tx_control *control,
+		  struct sk_buff *skb)
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index f7e74a0..e488b94 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -315,6 +315,7 @@
 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 						struct sk_buff *skb,
 						struct txentry_desc *txdesc,
+						struct ieee80211_sta *sta,
 						const struct rt2x00_rate *hwrate)
 {
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -322,11 +323,11 @@
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct rt2x00_sta *sta_priv = NULL;
 
-	if (tx_info->control.sta) {
+	if (sta) {
 		txdesc->u.ht.mpdu_density =
-		    tx_info->control.sta->ht_cap.ampdu_density;
+		    sta->ht_cap.ampdu_density;
 
-		sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
+		sta_priv = sta_to_rt2x00_sta(sta);
 		txdesc->u.ht.wcid = sta_priv->wcid;
 	}
 
@@ -341,8 +342,8 @@
 		 * MIMO PS should be set to 1 for STA's using dynamic SM PS
 		 * when using more then one tx stream (>MCS7).
 		 */
-		if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
-		    ((tx_info->control.sta->ht_cap.cap &
+		if (sta && txdesc->u.ht.mcs > 7 &&
+		    ((sta->ht_cap.cap &
 		      IEEE80211_HT_CAP_SM_PS) >>
 		     IEEE80211_HT_CAP_SM_PS_SHIFT) ==
 		    WLAN_HT_CAP_SM_PS_DYNAMIC)
@@ -409,7 +410,8 @@
 
 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
 					     struct sk_buff *skb,
-					     struct txentry_desc *txdesc)
+					     struct txentry_desc *txdesc,
+					     struct ieee80211_sta *sta)
 {
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -503,7 +505,7 @@
 
 	if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
 		rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
-						    hwrate);
+						   sta, hwrate);
 	else
 		rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
 						      hwrate);
@@ -595,7 +597,7 @@
 	 * after that we are free to use the skb->cb array
 	 * for our information.
 	 */
-	rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
+	rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
 
 	/*
 	 * All information is retrieved from the skb->cb array,
@@ -740,7 +742,7 @@
 	 * after that we are free to use the skb->cb array
 	 * for our information.
 	 */
-	rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
+	rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
 
 	/*
 	 * Fill in skb descriptor
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f322596..3f7bc5c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2243,8 +2243,7 @@
 
 static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
 {
-	struct ieee80211_conf conf = { .flags = 0 };
-	struct rt2x00lib_conf libconf = { .conf = &conf };
+	struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
 
 	rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 }
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index aceaf68..021d83e 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -244,7 +244,9 @@
 	return IRQ_HANDLED;
 }
 
-static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev,
+		       struct ieee80211_tx_control *control,
+		       struct sk_buff *skb)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -710,7 +712,7 @@
 	/* TODO: use actual beacon queue */
 	skb_set_queue_mapping(skb, 0);
 
-	rtl8180_tx(dev, skb);
+	rtl8180_tx(dev, NULL, skb);
 
 resched:
 	/*
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 71a30b02..7811b63 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -44,7 +44,7 @@
 MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
 MODULE_LICENSE("GPL");
 
-static struct usb_device_id rtl8187_table[] __devinitdata = {
+static struct usb_device_id rtl8187_table[] = {
 	/* Asus */
 	{USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
 	/* Belkin */
@@ -228,7 +228,9 @@
 	}
 }
 
-static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev,
+		       struct ieee80211_tx_control *control,
+		       struct sk_buff *skb)
 {
 	struct rtl8187_priv *priv = dev->priv;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1076,7 +1078,7 @@
 	/* TODO: use actual beacon queue */
 	skb_set_queue_mapping(skb, 0);
 
-	rtl8187_tx(dev, skb);
+	rtl8187_tx(dev, NULL, skb);
 
 resched:
 	/*
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 942e56b..59381fe 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1341,9 +1341,8 @@
 		rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
 
 		info->control.rates[0].idx = 0;
-		info->control.sta = sta;
 		info->band = hw->conf.channel->band;
-		rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+		rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
 	}
 err_free:
 	return 0;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index a18ad2a..a7c0e52 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -124,7 +124,9 @@
 	mutex_unlock(&rtlpriv->locks.conf_mutex);
 }
 
-static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void rtl_op_tx(struct ieee80211_hw *hw,
+		      struct ieee80211_tx_control *control,
+		      struct sk_buff *skb)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -138,8 +140,8 @@
 	if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
 		goto err_free;
 
-	if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
-		rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+	if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
+		rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
 
 	return;
 
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 80f75d3..aad9d44 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -504,7 +504,7 @@
 				_rtl_update_earlymode_info(hw, skb,
 							   &tcb_desc, tid);
 
-			rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+			rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
 		}
 	}
 }
@@ -929,7 +929,7 @@
 	info = IEEE80211_SKB_CB(pskb);
 	pdesc = &ring->desc[0];
 	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
-		info, pskb, BEACON_QUEUE, &tcb_desc);
+		info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
 
 	__skb_queue_tail(&ring->queue, pskb);
 
@@ -1305,11 +1305,10 @@
 }
 
 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+					struct ieee80211_sta *sta,
 					struct sk_buff *skb)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_sta *sta = info->control.sta;
 	struct rtl_sta_info *sta_entry = NULL;
 	u8 tid = rtl_get_tid(skb);
 
@@ -1337,13 +1336,14 @@
 	return true;
 }
 
-static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
-		struct rtl_tcb_desc *ptcb_desc)
+static int rtl_pci_tx(struct ieee80211_hw *hw,
+		      struct ieee80211_sta *sta,
+		      struct sk_buff *skb,
+		      struct rtl_tcb_desc *ptcb_desc)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_sta_info *sta_entry = NULL;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_sta *sta = info->control.sta;
 	struct rtl8192_tx_ring *ring;
 	struct rtl_tx_desc *pdesc;
 	u8 idx;
@@ -1418,7 +1418,7 @@
 		rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 
 	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
-			info, skb, hw_queue, ptcb_desc);
+			info, sta, skb, hw_queue, ptcb_desc);
 
 	__skb_queue_tail(&ring->queue, skb);
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52166640..390d6d4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -596,7 +596,9 @@
 
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
 			  struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-			  struct ieee80211_tx_info *info, struct sk_buff *skb,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
+			  struct sk_buff *skb,
 			  u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -604,7 +606,6 @@
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	bool defaultadapter = true;
-	struct ieee80211_sta *sta;
 	u8 *pdesc = pdesc_tx;
 	u16 seq_number;
 	__le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index c4adb97..a7cdd51 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -713,6 +713,7 @@
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
 			  struct ieee80211_hdr *hdr,
 			  u8 *pdesc, struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
 			  struct sk_buff *skb, u8 hw_queue,
 			  struct rtl_tcb_desc *ptcb_desc);
 bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 2e6eb35..27863d7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -496,7 +496,9 @@
 
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
 			  struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-			  struct ieee80211_tx_info *info, struct sk_buff *skb,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
+			  struct sk_buff *skb,
 			  u8 queue_index,
 			  struct rtl_tcb_desc *tcb_desc)
 {
@@ -504,7 +506,6 @@
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	bool defaultadapter = true;
-	struct ieee80211_sta *sta = info->control.sta = info->control.sta;
 	u8 *qc = ieee80211_get_qos_ctl(hdr);
 	u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
 	u16 seq_number;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 332b06e..725c53a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -420,7 +420,9 @@
 					   struct sk_buff_head *);
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
 			  struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-			  struct ieee80211_tx_info *info, struct sk_buff *skb,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
+			  struct sk_buff *skb,
 			  u8 queue_index,
 			  struct rtl_tcb_desc *tcb_desc);
 void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index f80690d..4686f34 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -551,7 +551,9 @@
 
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
 			  struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-			  struct ieee80211_tx_info *info, struct sk_buff *skb,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
+			  struct sk_buff *skb,
 			  u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -559,7 +561,6 @@
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	struct ieee80211_sta *sta = info->control.sta;
 	u8 *pdesc = pdesc_tx;
 	u16 seq_number;
 	__le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 057a524..c1b5dfb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -730,6 +730,7 @@
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
 			  struct ieee80211_hdr *hdr,
 			  u8 *pdesc, struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
 			  struct sk_buff *skb, u8 hw_queue,
 			  struct rtl_tcb_desc *ptcb_desc);
 bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36d1cb3a..28c53fb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -591,14 +591,15 @@
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
 		struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-		struct ieee80211_tx_info *info, struct sk_buff *skb,
+		struct ieee80211_tx_info *info,
+		struct ieee80211_sta *sta,
+		struct sk_buff *skb,
 		u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct ieee80211_sta *sta = info->control.sta;
 	u8 *pdesc = pdesc_tx;
 	u16 seq_number;
 	__le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 011e7b06..64dd66f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -31,6 +31,7 @@
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
 			  u8 *pdesc, struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
 			  struct sk_buff *skb, u8 hw_queue,
 			  struct rtl_tcb_desc *ptcb_desc);
 void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index aa970fc..9140469 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -848,8 +848,10 @@
 	_rtl_submit_tx_urb(hw, _urb);
 }
 
-static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
-			    u16 hw_queue)
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
+				   struct ieee80211_sta *sta,
+				   struct sk_buff *skb,
+				   u16 hw_queue)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -891,7 +893,7 @@
 		seq_number += 1;
 		seq_number <<= 4;
 	}
-	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
 					hw_queue, &tcb_desc);
 	if (!ieee80211_has_morefrags(hdr->frame_control)) {
 		if (qc)
@@ -901,7 +903,9 @@
 		rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 }
 
-static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+static int rtl_usb_tx(struct ieee80211_hw *hw,
+		      struct ieee80211_sta *sta,
+		      struct sk_buff *skb,
 		      struct rtl_tcb_desc *dummy)
 {
 	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
@@ -913,7 +917,7 @@
 	if (unlikely(is_hal_stop(rtlhal)))
 		goto err_free;
 	hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
-	_rtl_usb_tx_preprocess(hw, skb, hw_queue);
+	_rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
 	_rtl_usb_transmit(hw, skb, hw_queue);
 	return NETDEV_TX_OK;
 
@@ -923,6 +927,7 @@
 }
 
 static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+					struct ieee80211_sta *sta,
 					struct sk_buff *skb)
 {
 	return false;
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cdaa21f..40153e7 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -122,7 +122,7 @@
 	EEPROM_BOOT_EFUSE,
 };
 
-enum rtl_status {
+enum ttl_status {
 	RTL_STATUS_INTERFACE_START = 0,
 };
 
@@ -1418,6 +1418,7 @@
 	void (*fill_tx_desc) (struct ieee80211_hw *hw,
 			      struct ieee80211_hdr *hdr, u8 *pdesc_tx,
 			      struct ieee80211_tx_info *info,
+			      struct ieee80211_sta *sta,
 			      struct sk_buff *skb, u8 hw_queue,
 			      struct rtl_tcb_desc *ptcb_desc);
 	void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
@@ -1475,11 +1476,15 @@
 	int (*adapter_start) (struct ieee80211_hw *hw);
 	void (*adapter_stop) (struct ieee80211_hw *hw);
 
-	int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb,
-			struct rtl_tcb_desc *ptcb_desc);
+	int (*adapter_tx) (struct ieee80211_hw *hw,
+			   struct ieee80211_sta *sta,
+			   struct sk_buff *skb,
+			   struct rtl_tcb_desc *ptcb_desc);
 	void (*flush)(struct ieee80211_hw *hw, bool drop);
 	int (*reset_trx_ring) (struct ieee80211_hw *hw);
-	bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+	bool (*waitq_insert) (struct ieee80211_hw *hw,
+			      struct ieee80211_sta *sta,
+			      struct sk_buff *skb);
 
 	/*pci */
 	void (*disable_aspm) (struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 3118c42..441cbcc 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -354,7 +354,9 @@
 	return ret;
 }
 
-static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw,
+			 struct ieee80211_tx_control *control,
+			 struct sk_buff *skb)
 {
 	struct wl1251 *wl = hw->priv;
 	unsigned long flags;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 7254860..ff830cf 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1181,7 +1181,9 @@
 	return ret;
 }
 
-static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw,
+			 struct ieee80211_tx_control *control,
+			 struct sk_buff *skb)
 {
 	struct wl1271 *wl = hw->priv;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1197,7 +1199,7 @@
 	mapping = skb_get_queue_mapping(skb);
 	q = wl1271_tx_get_queue(mapping);
 
-	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
+	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
 
 	spin_lock_irqsave(&wl->wl_lock, flags);
 
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0081f7..1a2f31c 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -130,16 +130,13 @@
 }
 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
 
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-			 struct sk_buff *skb)
+static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				struct sk_buff *skb, struct ieee80211_sta *sta)
 {
-	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
-
-	if (control->control.sta) {
+	if (sta) {
 		struct wl1271_station *wl_sta;
 
-		wl_sta = (struct wl1271_station *)
-				control->control.sta->drv_priv;
+		wl_sta = (struct wl1271_station *)sta->drv_priv;
 		return wl_sta->hlid;
 	} else {
 		struct ieee80211_hdr *hdr;
@@ -156,7 +153,7 @@
 }
 
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-		      struct sk_buff *skb)
+		      struct sk_buff *skb, struct ieee80211_sta *sta)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
@@ -164,7 +161,7 @@
 		return wl->system_hlid;
 
 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
-		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
+		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
 
 	if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
 	     test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
@@ -344,13 +341,12 @@
 
 /* caller must hold wl->mutex */
 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-				   struct sk_buff *skb, u32 buf_offset)
+				   struct sk_buff *skb, u32 buf_offset, u8 hlid)
 {
 	struct ieee80211_tx_info *info;
 	u32 extra = 0;
 	int ret = 0;
 	u32 total_len;
-	u8 hlid;
 	bool is_dummy;
 	bool is_gem = false;
 
@@ -359,9 +355,13 @@
 		return -EINVAL;
 	}
 
+	if (hlid == WL12XX_INVALID_LINK_ID) {
+		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
+		return -EINVAL;
+	}
+
 	info = IEEE80211_SKB_CB(skb);
 
-	/* TODO: handle dummy packets on multi-vifs */
 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
 
 	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
@@ -386,11 +386,6 @@
 
 		is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
 	}
-	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
-	if (hlid == WL12XX_INVALID_LINK_ID) {
-		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
-		return -EINVAL;
-	}
 
 	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
 				 is_gem);
@@ -517,7 +512,8 @@
 }
 
 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
-					      struct wl12xx_vif *wlvif)
+					      struct wl12xx_vif *wlvif,
+					      u8 *hlid)
 {
 	struct sk_buff *skb = NULL;
 	int i, h, start_hlid;
@@ -544,10 +540,11 @@
 	if (!skb)
 		wlvif->last_tx_hlid = 0;
 
+	*hlid = wlvif->last_tx_hlid;
 	return skb;
 }
 
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
 {
 	unsigned long flags;
 	struct wl12xx_vif *wlvif = wl->last_wlvif;
@@ -556,7 +553,7 @@
 	/* continue from last wlvif (round robin) */
 	if (wlvif) {
 		wl12xx_for_each_wlvif_continue(wl, wlvif) {
-			skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+			skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
 			if (skb) {
 				wl->last_wlvif = wlvif;
 				break;
@@ -565,13 +562,15 @@
 	}
 
 	/* dequeue from the system HLID before the restarting wlvif list */
-	if (!skb)
+	if (!skb) {
 		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
+		*hlid = wl->system_hlid;
+	}
 
 	/* do a new pass over the wlvif list */
 	if (!skb) {
 		wl12xx_for_each_wlvif(wl, wlvif) {
-			skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+			skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
 			if (skb) {
 				wl->last_wlvif = wlvif;
 				break;
@@ -591,6 +590,7 @@
 		int q;
 
 		skb = wl->dummy_packet;
+		*hlid = wl->system_hlid;
 		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 		spin_lock_irqsave(&wl->wl_lock, flags);
 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
@@ -602,7 +602,7 @@
 }
 
 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-				  struct sk_buff *skb)
+				  struct sk_buff *skb, u8 hlid)
 {
 	unsigned long flags;
 	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -610,7 +610,6 @@
 	if (wl12xx_is_dummy_packet(wl, skb)) {
 		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
 	} else {
-		u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
 		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
 
 		/* make sure we dequeue the same packet next time */
@@ -686,26 +685,30 @@
 	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
 	int ret = 0;
 	int bus_ret = 0;
+	u8 hlid;
 
 	if (unlikely(wl->state == WL1271_STATE_OFF))
 		return 0;
 
-	while ((skb = wl1271_skb_dequeue(wl))) {
+	while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 		bool has_data = false;
 
 		wlvif = NULL;
 		if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
 			wlvif = wl12xx_vif_to_data(info->control.vif);
+		else
+			hlid = wl->system_hlid;
 
 		has_data = wlvif && wl1271_tx_is_data_present(skb);
-		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
+		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
+					      hlid);
 		if (ret == -EAGAIN) {
 			/*
 			 * Aggregation buffer is full.
 			 * Flush buffer and try again.
 			 */
-			wl1271_skb_queue_head(wl, wlvif, skb);
+			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
 
 			buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
 							    last_len);
@@ -722,7 +725,7 @@
 			 * Firmware buffer is full.
 			 * Queue back last skb, and stop aggregating.
 			 */
-			wl1271_skb_queue_head(wl, wlvif, skb);
+			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
 			/* No work left, avoid scheduling redundant tx work */
 			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 			goto out_ack;
@@ -732,7 +735,7 @@
 				 * fw still expects dummy packet,
 				 * so re-enqueue it
 				 */
-				wl1271_skb_queue_head(wl, wlvif, skb);
+				wl1271_skb_queue_head(wl, wlvif, skb, hlid);
 			else
 				ieee80211_free_txskb(wl->hw, skb);
 			goto out_ack;
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 1e939b0..349520d 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -243,10 +243,8 @@
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
 				enum ieee80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-			 struct sk_buff *skb);
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-		      struct sk_buff *skb);
+		      struct sk_buff *skb, struct ieee80211_sta *sta);
 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c9e2660e..4598801 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -937,7 +937,9 @@
  * control block of the skbuff will be initialized. If necessary the incoming
  * mac80211 queues will be stopped.
  */
-static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw,
+		     struct ieee80211_tx_control *control,
+		     struct sk_buff *skb)
 {
 	struct zd_mac *mac = zd_hw_mac(hw);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1176,7 +1178,7 @@
 		skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
 		if (!skb)
 			break;
-		zd_op_tx(mac->hw, skb);
+		zd_op_tx(mac->hw, NULL, skb);
 	}
 
 	/*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 3089990..39afd37 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1731,7 +1731,7 @@
 		break;
 
 	case XenbusStateConnected:
-		netif_notify_peers(netdev);
+		netdev_notify_peers(netdev);
 		break;
 
 	case XenbusStateClosing:
diff --git a/drivers/of/base.c b/drivers/of/base.c
index c181b94..d4a1c9a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -364,6 +364,33 @@
 EXPORT_SYMBOL(of_get_next_child);
 
 /**
+ *	of_get_next_available_child - Find the next available child node
+ *	@node:	parent node
+ *	@prev:	previous child of the parent node, or NULL to get first
+ *
+ *      This function is like of_get_next_child(), except that it
+ *      automatically skips any disabled nodes (i.e. status = "disabled").
+ */
+struct device_node *of_get_next_available_child(const struct device_node *node,
+	struct device_node *prev)
+{
+	struct device_node *next;
+
+	read_lock(&devtree_lock);
+	next = prev ? prev->sibling : node->child;
+	for (; next; next = next->sibling) {
+		if (!of_device_is_available(next))
+			continue;
+		if (of_node_get(next))
+			break;
+	}
+	of_node_put(prev);
+	read_unlock(&devtree_lock);
+	return next;
+}
+EXPORT_SYMBOL(of_get_next_available_child);
+
+/**
  *	of_find_node_by_path - Find a node matching a full OF path
  *	@path:	The full path to match
  *
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index fbf7b26..c5792d6 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -266,8 +266,8 @@
 	}
 
 	if (!error)
-		dev_printk(KERN_INFO, &dev->dev,
-				"power state changed by ACPI to D%d\n", state);
+		dev_info(&dev->dev, "power state changed by ACPI to %s\n",
+			 pci_power_name(state));
 
 	return error;
 }
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 185be37..5270f1a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -959,6 +959,13 @@
 	if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
 		pci_prepare_to_sleep(pci_dev);
 
+	/*
+	 * The reason for doing this here is the same as for the analogous code
+	 * in pci_pm_suspend_noirq().
+	 */
+	if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+		pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
 	return 0;
 }
 
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index fb7f3be..dc5c126 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -657,11 +657,7 @@
 	if (p != NULL)
 		return ERR_PTR(-EBUSY);
 
-	p = create_pinctrl(dev);
-	if (IS_ERR(p))
-		return p;
-
-	return p;
+	return create_pinctrl(dev);
 }
 
 /**
@@ -738,11 +734,8 @@
 			dev_dbg(p->dev, "using pinctrl dummy state (%s)\n",
 				name);
 			state = create_state(p, name);
-			if (IS_ERR(state))
-				return state;
-		} else {
-			return ERR_PTR(-ENODEV);
-		}
+		} else
+			state = ERR_PTR(-ENODEV);
 	}
 
 	return state;
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c
index 75d3eff..3674d87 100644
--- a/drivers/pinctrl/pinctrl-imx23.c
+++ b/drivers/pinctrl/pinctrl-imx23.c
@@ -292,7 +292,7 @@
 {
 	return platform_driver_register(&imx23_pinctrl_driver);
 }
-arch_initcall(imx23_pinctrl_init);
+postcore_initcall(imx23_pinctrl_init);
 
 static void __exit imx23_pinctrl_exit(void)
 {
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c
index b973026..0f5b212 100644
--- a/drivers/pinctrl/pinctrl-imx28.c
+++ b/drivers/pinctrl/pinctrl-imx28.c
@@ -408,7 +408,7 @@
 {
 	return platform_driver_register(&imx28_pinctrl_driver);
 }
-arch_initcall(imx28_pinctrl_init);
+postcore_initcall(imx28_pinctrl_init);
 
 static void __exit imx28_pinctrl_exit(void)
 {
diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c
index 689b3c8..9fd0216 100644
--- a/drivers/pinctrl/pinctrl-imx51.c
+++ b/drivers/pinctrl/pinctrl-imx51.c
@@ -974,7 +974,7 @@
 	IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */
 	IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */
 	IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */
-	IMX_PIN_REG(MX51_PAD_SD2_CMD, NO_PAD, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */
+	IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */
 	IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */
 	IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */
 	IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 6f99769..a39fb7a 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -505,6 +505,8 @@
 	DB8500_PIN_J3, DB8500_PIN_H2, DB8500_PIN_J2, DB8500_PIN_H1,
 	DB8500_PIN_F4, DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2,
 	DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 };
+static const unsigned kp_b_2_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1,
+	DB8500_PIN_G3, DB8500_PIN_G2, DB8500_PIN_F4, DB8500_PIN_E3};
 static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3,
 	DB8500_PIN_C4, DB8500_PIN_E6, DB8500_PIN_A3, DB8500_PIN_B6,
 	DB8500_PIN_D6, DB8500_PIN_B7, DB8500_PIN_D7, DB8500_PIN_D8,
@@ -662,6 +664,7 @@
 	DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B),
 	DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),
 	DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
+	DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B),
 	DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
 	DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
 	DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
@@ -751,7 +754,7 @@
 DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
 DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1",
 	"lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1");
-DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_c_1", "kp_oc1_1");
+DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1");
 DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1");
 DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1");
 DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1");
@@ -766,7 +769,7 @@
 DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
 DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
 DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
-DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2");
+DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
 DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
 DB8500_FUNC_GROUPS(usb, "usb_a_1");
 DB8500_FUNC_GROUPS(trig, "trig_b_1");
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 53b0d49..3dde653 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -1292,7 +1292,7 @@
 						NOMADIK_GPIO_TO_IRQ(pdata->first_gpio),
 						0, &nmk_gpio_irq_simple_ops, nmk_chip);
 	if (!nmk_chip->domain) {
-		pr_err("%s: Failed to create irqdomain\n", np->full_name);
+		dev_err(&dev->dev, "failed to create irqdomain\n");
 		ret = -ENOSYS;
 		goto out;
 	}
@@ -1731,7 +1731,6 @@
 	for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
 		if (!nmk_gpio_chips[i]) {
 			dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
-			devm_kfree(&pdev->dev, npct);
 			return -EPROBE_DEFER;
 		}
 		npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip;
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 2aae8a8..7fca6ce 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1217,7 +1217,6 @@
 	iounmap(spmx->gpio_virtbase);
 out_no_gpio_remap:
 	platform_set_drvdata(pdev, NULL);
-	devm_kfree(&pdev->dev, spmx);
 	return ret;
 }
 
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index a7ad8c1..309f5b9 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1121,10 +1121,8 @@
 	upmx->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		ret = -ENOENT;
-		goto out_no_resource;
-	}
+	if (!res)
+		return -ENOENT;
 	upmx->phybase = res->start;
 	upmx->physize = resource_size(res);
 
@@ -1165,8 +1163,6 @@
 	platform_set_drvdata(pdev, NULL);
 out_no_memregion:
 	release_mem_region(upmx->phybase, upmx->physize);
-out_no_resource:
-	devm_kfree(&pdev->dev, upmx);
 	return ret;
 }
 
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2a262f5..c86bae8 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -289,6 +289,7 @@
 	tristate "Lenovo IdeaPad Laptop Extras"
 	depends on ACPI
 	depends on RFKILL && INPUT
+	depends on SERIO_I8042
 	select INPUT_SPARSEKMAP
 	help
 	  This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
@@ -758,8 +759,11 @@
 
 config APPLE_GMUX
 	tristate "Apple Gmux Driver"
+	depends on ACPI
 	depends on PNP
-	select BACKLIGHT_CLASS_DEVICE
+	depends on BACKLIGHT_CLASS_DEVICE
+	depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
+	depends on ACPI_VIDEO=n || ACPI_VIDEO
 	---help---
 	  This driver provides support for the gmux device found on many
 	  Apple laptops, which controls the display mux for the hybrid
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 905fa01..dfb1a92 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -2,6 +2,7 @@
  *  Gmux driver for Apple laptops
  *
  *  Copyright (C) Canonical Ltd. <seth.forshee@canonical.com>
+ *  Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License version 2 as
@@ -18,16 +19,30 @@
 #include <linux/pnp.h>
 #include <linux/apple_bl.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vga_switcheroo.h>
 #include <acpi/video.h>
 #include <asm/io.h>
 
 struct apple_gmux_data {
 	unsigned long iostart;
 	unsigned long iolen;
+	bool indexed;
+	struct mutex index_lock;
 
 	struct backlight_device *bdev;
+
+	/* switcheroo data */
+	acpi_handle dhandle;
+	int gpe;
+	enum vga_switcheroo_client_id resume_client_id;
+	enum vga_switcheroo_state power_state;
+	struct completion powerchange_done;
 };
 
+static struct apple_gmux_data *apple_gmux_data;
+
 /*
  * gmux port offsets. Many of these are not yet used, but may be in the
  * future, and it's useful to have them documented here anyhow.
@@ -45,6 +60,9 @@
 #define GMUX_PORT_DISCRETE_POWER	0x50
 #define GMUX_PORT_MAX_BRIGHTNESS	0x70
 #define GMUX_PORT_BRIGHTNESS		0x74
+#define GMUX_PORT_VALUE			0xc2
+#define GMUX_PORT_READ			0xd0
+#define GMUX_PORT_WRITE			0xd4
 
 #define GMUX_MIN_IO_LEN			(GMUX_PORT_BRIGHTNESS + 4)
 
@@ -59,22 +77,172 @@
 #define GMUX_BRIGHTNESS_MASK		0x00ffffff
 #define GMUX_MAX_BRIGHTNESS		GMUX_BRIGHTNESS_MASK
 
-static inline u8 gmux_read8(struct apple_gmux_data *gmux_data, int port)
+static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)
 {
 	return inb(gmux_data->iostart + port);
 }
 
-static inline void gmux_write8(struct apple_gmux_data *gmux_data, int port,
+static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port,
 			       u8 val)
 {
 	outb(val, gmux_data->iostart + port);
 }
 
-static inline u32 gmux_read32(struct apple_gmux_data *gmux_data, int port)
+static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port)
 {
 	return inl(gmux_data->iostart + port);
 }
 
+static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port,
+			     u32 val)
+{
+	int i;
+	u8 tmpval;
+
+	for (i = 0; i < 4; i++) {
+		tmpval = (val >> (i * 8)) & 0xff;
+		outb(tmpval, port + i);
+	}
+}
+
+static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data)
+{
+	int i = 200;
+	u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+
+	while (i && (gwr & 0x01)) {
+		inb(gmux_data->iostart + GMUX_PORT_READ);
+		gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+		udelay(100);
+		i--;
+	}
+
+	return !!i;
+}
+
+static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data)
+{
+	int i = 200;
+	u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+
+	while (i && !(gwr & 0x01)) {
+		gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+		udelay(100);
+		i--;
+	}
+
+	if (gwr & 0x01)
+		inb(gmux_data->iostart + GMUX_PORT_READ);
+
+	return !!i;
+}
+
+static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port)
+{
+	u8 val;
+
+	mutex_lock(&gmux_data->index_lock);
+	outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
+	gmux_index_wait_ready(gmux_data);
+	val = inb(gmux_data->iostart + GMUX_PORT_VALUE);
+	mutex_unlock(&gmux_data->index_lock);
+
+	return val;
+}
+
+static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port,
+			      u8 val)
+{
+	mutex_lock(&gmux_data->index_lock);
+	outb(val, gmux_data->iostart + GMUX_PORT_VALUE);
+	gmux_index_wait_ready(gmux_data);
+	outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE);
+	gmux_index_wait_complete(gmux_data);
+	mutex_unlock(&gmux_data->index_lock);
+}
+
+static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port)
+{
+	u32 val;
+
+	mutex_lock(&gmux_data->index_lock);
+	outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
+	gmux_index_wait_ready(gmux_data);
+	val = inl(gmux_data->iostart + GMUX_PORT_VALUE);
+	mutex_unlock(&gmux_data->index_lock);
+
+	return val;
+}
+
+static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port,
+			       u32 val)
+{
+	int i;
+	u8 tmpval;
+
+	mutex_lock(&gmux_data->index_lock);
+
+	for (i = 0; i < 4; i++) {
+		tmpval = (val >> (i * 8)) & 0xff;
+		outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i);
+	}
+
+	gmux_index_wait_ready(gmux_data);
+	outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE);
+	gmux_index_wait_complete(gmux_data);
+	mutex_unlock(&gmux_data->index_lock);
+}
+
+static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port)
+{
+	if (gmux_data->indexed)
+		return gmux_index_read8(gmux_data, port);
+	else
+		return gmux_pio_read8(gmux_data, port);
+}
+
+static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val)
+{
+	if (gmux_data->indexed)
+		gmux_index_write8(gmux_data, port, val);
+	else
+		gmux_pio_write8(gmux_data, port, val);
+}
+
+static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port)
+{
+	if (gmux_data->indexed)
+		return gmux_index_read32(gmux_data, port);
+	else
+		return gmux_pio_read32(gmux_data, port);
+}
+
+static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
+			     u32 val)
+{
+	if (gmux_data->indexed)
+		gmux_index_write32(gmux_data, port, val);
+	else
+		gmux_pio_write32(gmux_data, port, val);
+}
+
+static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
+{
+	u16 val;
+
+	outb(0xaa, gmux_data->iostart + 0xcc);
+	outb(0x55, gmux_data->iostart + 0xcd);
+	outb(0x00, gmux_data->iostart + 0xce);
+
+	val = inb(gmux_data->iostart + 0xcc) |
+		(inb(gmux_data->iostart + 0xcd) << 8);
+
+	if (val == 0x55aa)
+		return true;
+
+	return false;
+}
+
 static int gmux_get_brightness(struct backlight_device *bd)
 {
 	struct apple_gmux_data *gmux_data = bl_get_data(bd);
@@ -90,16 +258,7 @@
 	if (bd->props.state & BL_CORE_SUSPENDED)
 		return 0;
 
-	/*
-	 * Older gmux versions require writing out lower bytes first then
-	 * setting the upper byte to 0 to flush the values. Newer versions
-	 * accept a single u32 write, but the old method also works, so we
-	 * just use the old method for all gmux versions.
-	 */
-	gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
-	gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 1, brightness >> 8);
-	gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 2, brightness >> 16);
-	gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 3, 0);
+	gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
 
 	return 0;
 }
@@ -110,6 +269,146 @@
 	.update_status = gmux_update_status,
 };
 
+static int gmux_switchto(enum vga_switcheroo_client_id id)
+{
+	if (id == VGA_SWITCHEROO_IGD) {
+		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1);
+		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2);
+		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2);
+	} else {
+		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2);
+		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3);
+		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
+	}
+
+	return 0;
+}
+
+static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
+				   enum vga_switcheroo_state state)
+{
+	INIT_COMPLETION(gmux_data->powerchange_done);
+
+	if (state == VGA_SWITCHEROO_ON) {
+		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
+		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3);
+		pr_debug("Discrete card powered up\n");
+	} else {
+		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
+		gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0);
+		pr_debug("Discrete card powered down\n");
+	}
+
+	gmux_data->power_state = state;
+
+	if (gmux_data->gpe >= 0 &&
+	    !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done,
+						       msecs_to_jiffies(200)))
+		pr_warn("Timeout waiting for gmux switch to complete\n");
+
+	return 0;
+}
+
+static int gmux_set_power_state(enum vga_switcheroo_client_id id,
+				enum vga_switcheroo_state state)
+{
+	if (id == VGA_SWITCHEROO_IGD)
+		return 0;
+
+	return gmux_set_discrete_state(apple_gmux_data, state);
+}
+
+static int gmux_get_client_id(struct pci_dev *pdev)
+{
+	/*
+	 * Early Macbook Pros with switchable graphics use nvidia
+	 * integrated graphics. Hardcode that the 9400M is integrated.
+	 */
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+		return VGA_SWITCHEROO_IGD;
+	else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
+		 pdev->device == 0x0863)
+		return VGA_SWITCHEROO_IGD;
+	else
+		return VGA_SWITCHEROO_DIS;
+}
+
+static enum vga_switcheroo_client_id
+gmux_active_client(struct apple_gmux_data *gmux_data)
+{
+	if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2)
+		return VGA_SWITCHEROO_IGD;
+
+	return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler gmux_handler = {
+	.switchto = gmux_switchto,
+	.power_state = gmux_set_power_state,
+	.get_client_id = gmux_get_client_id,
+};
+
+static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data)
+{
+	gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
+		    GMUX_INTERRUPT_DISABLE);
+}
+
+static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data)
+{
+	gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
+		    GMUX_INTERRUPT_ENABLE);
+}
+
+static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data)
+{
+	return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS);
+}
+
+static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data)
+{
+	u8 status;
+
+	/* to clear interrupts write back current status */
+	status = gmux_interrupt_get_status(gmux_data);
+	gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status);
+}
+
+static void gmux_notify_handler(acpi_handle device, u32 value, void *context)
+{
+	u8 status;
+	struct pnp_dev *pnp = (struct pnp_dev *)context;
+	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
+	status = gmux_interrupt_get_status(gmux_data);
+	gmux_disable_interrupts(gmux_data);
+	pr_debug("Notify handler called: status %d\n", status);
+
+	gmux_clear_interrupts(gmux_data);
+	gmux_enable_interrupts(gmux_data);
+
+	if (status & GMUX_INTERRUPT_STATUS_POWER)
+		complete(&gmux_data->powerchange_done);
+}
+
+static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state)
+{
+	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+	gmux_data->resume_client_id = gmux_active_client(gmux_data);
+	gmux_disable_interrupts(gmux_data);
+	return 0;
+}
+
+static int gmux_resume(struct pnp_dev *pnp)
+{
+	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+	gmux_enable_interrupts(gmux_data);
+	gmux_switchto(gmux_data->resume_client_id);
+	if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
+		gmux_set_discrete_state(gmux_data, gmux_data->power_state);
+	return 0;
+}
+
 static int __devinit gmux_probe(struct pnp_dev *pnp,
 				const struct pnp_device_id *id)
 {
@@ -119,6 +418,11 @@
 	struct backlight_device *bdev;
 	u8 ver_major, ver_minor, ver_release;
 	int ret = -ENXIO;
+	acpi_status status;
+	unsigned long long gpe;
+
+	if (apple_gmux_data)
+		return -EBUSY;
 
 	gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
 	if (!gmux_data)
@@ -147,22 +451,29 @@
 	}
 
 	/*
-	 * On some machines the gmux is in ACPI even thought the machine
-	 * doesn't really have a gmux. Check for invalid version information
-	 * to detect this.
+	 * Invalid version information may indicate either that the gmux
+	 * device isn't present or that it's a new one that uses indexed
+	 * io
 	 */
+
 	ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
 	ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
 	ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
 	if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
-		pr_info("gmux device not present\n");
-		ret = -ENODEV;
-		goto err_release;
+		if (gmux_is_indexed(gmux_data)) {
+			mutex_init(&gmux_data->index_lock);
+			gmux_data->indexed = true;
+		} else {
+			pr_info("gmux device not present\n");
+			ret = -ENODEV;
+			goto err_release;
+		}
+		pr_info("Found indexed gmux\n");
+	} else {
+		pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor,
+			ver_release);
 	}
 
-	pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor,
-		ver_release);
-
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_PLATFORM;
 	props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
@@ -194,13 +505,67 @@
 	 * Disable the other backlight choices.
 	 */
 	acpi_video_dmi_promote_vendor();
-#ifdef CONFIG_ACPI_VIDEO
+#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)
 	acpi_video_unregister();
 #endif
 	apple_bl_unregister();
 
+	gmux_data->power_state = VGA_SWITCHEROO_ON;
+
+	gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev);
+	if (!gmux_data->dhandle) {
+		pr_err("Cannot find acpi handle for pnp device %s\n",
+		       dev_name(&pnp->dev));
+		ret = -ENODEV;
+		goto err_notify;
+	}
+
+	status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe);
+	if (ACPI_SUCCESS(status)) {
+		gmux_data->gpe = (int)gpe;
+
+		status = acpi_install_notify_handler(gmux_data->dhandle,
+						     ACPI_DEVICE_NOTIFY,
+						     &gmux_notify_handler, pnp);
+		if (ACPI_FAILURE(status)) {
+			pr_err("Install notify handler failed: %s\n",
+			       acpi_format_exception(status));
+			ret = -ENODEV;
+			goto err_notify;
+		}
+
+		status = acpi_enable_gpe(NULL, gmux_data->gpe);
+		if (ACPI_FAILURE(status)) {
+			pr_err("Cannot enable gpe: %s\n",
+			       acpi_format_exception(status));
+			goto err_enable_gpe;
+		}
+	} else {
+		pr_warn("No GPE found for gmux\n");
+		gmux_data->gpe = -1;
+	}
+
+	if (vga_switcheroo_register_handler(&gmux_handler)) {
+		ret = -ENODEV;
+		goto err_register_handler;
+	}
+
+	init_completion(&gmux_data->powerchange_done);
+	apple_gmux_data = gmux_data;
+	gmux_enable_interrupts(gmux_data);
+
 	return 0;
 
+err_register_handler:
+	if (gmux_data->gpe >= 0)
+		acpi_disable_gpe(NULL, gmux_data->gpe);
+err_enable_gpe:
+	if (gmux_data->gpe >= 0)
+		acpi_remove_notify_handler(gmux_data->dhandle,
+					   ACPI_DEVICE_NOTIFY,
+					   &gmux_notify_handler);
+err_notify:
+	backlight_device_unregister(bdev);
 err_release:
 	release_region(gmux_data->iostart, gmux_data->iolen);
 err_free:
@@ -212,12 +577,23 @@
 {
 	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
 
+	vga_switcheroo_unregister_handler();
+	gmux_disable_interrupts(gmux_data);
+	if (gmux_data->gpe >= 0) {
+		acpi_disable_gpe(NULL, gmux_data->gpe);
+		acpi_remove_notify_handler(gmux_data->dhandle,
+					   ACPI_DEVICE_NOTIFY,
+					   &gmux_notify_handler);
+	}
+
 	backlight_device_unregister(gmux_data->bdev);
+
 	release_region(gmux_data->iostart, gmux_data->iolen);
+	apple_gmux_data = NULL;
 	kfree(gmux_data);
 
 	acpi_video_dmi_demote_vendor();
-#ifdef CONFIG_ACPI_VIDEO
+#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)
 	acpi_video_register();
 #endif
 	apple_bl_register();
@@ -233,6 +609,8 @@
 	.probe		= gmux_probe,
 	.remove		= __devexit_p(gmux_remove),
 	.id_table	= gmux_device_ids,
+	.suspend	= gmux_suspend,
+	.resume		= gmux_resume
 };
 
 static int __init apple_gmux_init(void)
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index c7a36f6..2eb9fe8 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -101,6 +101,7 @@
 #define ASUS_WMI_DEVID_WIRELESS_LED	0x00010002
 #define ASUS_WMI_DEVID_CWAP		0x00010003
 #define ASUS_WMI_DEVID_WLAN		0x00010011
+#define ASUS_WMI_DEVID_WLAN_LED		0x00010012
 #define ASUS_WMI_DEVID_BLUETOOTH	0x00010013
 #define ASUS_WMI_DEVID_GPS		0x00010015
 #define ASUS_WMI_DEVID_WIMAX		0x00010017
@@ -731,8 +732,21 @@
 {
 	struct asus_rfkill *priv = data;
 	u32 ctrl_param = !blocked;
+	u32 dev_id = priv->dev_id;
 
-	return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL);
+	/*
+	 * If the user bit is set, BIOS can't set and record the wlan status,
+	 * it will report the value read from id ASUS_WMI_DEVID_WLAN_LED
+	 * while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN).
+	 * So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED
+	 * while setting the wlan status through WMI.
+	 * This is also the behavior that windows app will do.
+	 */
+	if ((dev_id == ASUS_WMI_DEVID_WLAN) &&
+	     priv->asus->driver->wlan_ctrl_by_user)
+		dev_id = ASUS_WMI_DEVID_WLAN_LED;
+
+	return asus_wmi_set_devstate(dev_id, ctrl_param, NULL);
 }
 
 static void asus_rfkill_query(struct rfkill *rfkill, void *data)
@@ -1653,6 +1667,7 @@
 	struct asus_wmi *asus;
 	acpi_status status;
 	int err;
+	u32 result;
 
 	asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL);
 	if (!asus)
@@ -1711,6 +1726,10 @@
 	if (err)
 		goto fail_debugfs;
 
+	asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+	if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+		asus->driver->wlan_ctrl_by_user = 1;
+
 	return 0;
 
 fail_debugfs:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 9c1da8b..4c9bd38 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -46,6 +46,7 @@
 struct asus_wmi_driver {
 	int			brightness;
 	int			panel_power;
+	int			wlan_ctrl_by_user;
 
 	const char		*name;
 	struct module		*owner;
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 2ca7dd1..c87ff16 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -350,6 +350,7 @@
 	inputdev->close = cmpc_accel_close_v4;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cmpc_accel_suspend_v4(struct device *dev)
 {
 	struct input_dev *inputdev;
@@ -384,6 +385,7 @@
 
 	return 0;
 }
+#endif
 
 static int cmpc_accel_add_v4(struct acpi_device *acpi)
 {
@@ -723,8 +725,10 @@
 	struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
 
 	if (event == 0x81) {
-		if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val)))
+		if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) {
 			input_report_switch(inputdev, SW_TABLET_MODE, !val);
+			input_sync(inputdev);
+		}
 	}
 }
 
@@ -737,8 +741,10 @@
 	set_bit(SW_TABLET_MODE, inputdev->swbit);
 
 	acpi = to_acpi_device(inputdev->dev.parent);
-	if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val)))
+	if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) {
 		input_report_switch(inputdev, SW_TABLET_MODE, !val);
+		input_sync(inputdev);
+	}
 }
 
 static int cmpc_tablet_add(struct acpi_device *acpi)
@@ -752,15 +758,19 @@
 	return cmpc_remove_acpi_notify_device(acpi);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cmpc_tablet_resume(struct device *dev)
 {
 	struct input_dev *inputdev = dev_get_drvdata(dev);
 
 	unsigned long long val = 0;
-	if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val)))
+	if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) {
 		input_report_switch(inputdev, SW_TABLET_MODE, !val);
+		input_sync(inputdev);
+	}
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume);
 
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 4e96e8c..927c33a 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -211,7 +211,7 @@
 		.ident = "Dell Inspiron 5420",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5420"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"),
 		},
 		.driver_data = &quirk_dell_vostro_v130,
 	},
@@ -220,7 +220,7 @@
 		.ident = "Dell Inspiron 5520",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5520"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"),
 		},
 		.driver_data = &quirk_dell_vostro_v130,
 	},
@@ -229,7 +229,7 @@
 		.ident = "Dell Inspiron 5720",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5720"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"),
 		},
 		.driver_data = &quirk_dell_vostro_v130,
 	},
@@ -238,7 +238,7 @@
 		.ident = "Dell Inspiron 7420",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7420"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"),
 		},
 		.driver_data = &quirk_dell_vostro_v130,
 	},
@@ -247,7 +247,7 @@
 		.ident = "Dell Inspiron 7520",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7520"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
 		},
 		.driver_data = &quirk_dell_vostro_v130,
 	},
@@ -256,7 +256,7 @@
 		.ident = "Dell Inspiron 7720",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7720"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
 		},
 		.driver_data = &quirk_dell_vostro_v130,
 	},
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index d2e4173..7acae3f 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -440,11 +440,13 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_fujitsu_resume(struct device *dev)
 {
 	fujitsu_reset();
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume);
 
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index d9ab6f6..777c7e3 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -305,10 +305,12 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int hdaps_resume(struct device *dev)
 {
 	return hdaps_device_init();
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume);
 
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index f4d9115..6b9af98 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -352,7 +352,7 @@
 }
 
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int lis3lv02d_suspend(struct device *dev)
 {
 	/* make sure the device is off when we suspend */
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 17f6dfd..dae7abe 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -36,6 +36,7 @@
 #include <linux/fb.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/i8042.h>
 
 #define IDEAPAD_RFKILL_DEV_NUM	(3)
 
@@ -63,8 +64,11 @@
 	VPCCMD_R_3G,
 	VPCCMD_W_3G,
 	VPCCMD_R_ODD, /* 0x21 */
-	VPCCMD_R_RF = 0x23,
+	VPCCMD_W_FAN,
+	VPCCMD_R_RF,
 	VPCCMD_W_RF,
+	VPCCMD_R_FAN = 0x2B,
+	VPCCMD_R_SPECIAL_BUTTONS = 0x31,
 	VPCCMD_W_BL_POWER = 0x33,
 };
 
@@ -356,14 +360,46 @@
 		return -EINVAL;
 	ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
 	if (ret < 0)
-		return ret;
+		return -EIO;
 	return count;
 }
 
 static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
 
+static ssize_t show_ideapad_fan(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	unsigned long result;
+
+	if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
+		return sprintf(buf, "-1\n");
+	return sprintf(buf, "%lu\n", result);
+}
+
+static ssize_t store_ideapad_fan(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	int ret, state;
+
+	if (!count)
+		return 0;
+	if (sscanf(buf, "%i", &state) != 1)
+		return -EINVAL;
+	if (state < 0 || state > 4 || state == 3)
+		return -EINVAL;
+	ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
+	if (ret < 0)
+		return -EIO;
+	return count;
+}
+
+static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan);
+
 static struct attribute *ideapad_attributes[] = {
 	&dev_attr_camera_power.attr,
+	&dev_attr_fan_mode.attr,
 	NULL
 };
 
@@ -377,7 +413,10 @@
 
 	if (attr == &dev_attr_camera_power.attr)
 		supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
-	else
+	else if (attr == &dev_attr_fan_mode.attr) {
+		unsigned long value;
+		supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
+	} else
 		supported = true;
 
 	return supported ? attr->mode : 0;
@@ -518,9 +557,15 @@
  */
 static const struct key_entry ideapad_keymap[] = {
 	{ KE_KEY, 6,  { KEY_SWITCHVIDEOMODE } },
+	{ KE_KEY, 7,  { KEY_CAMERA } },
+	{ KE_KEY, 11, { KEY_F16 } },
 	{ KE_KEY, 13, { KEY_WLAN } },
 	{ KE_KEY, 16, { KEY_PROG1 } },
 	{ KE_KEY, 17, { KEY_PROG2 } },
+	{ KE_KEY, 64, { KEY_PROG3 } },
+	{ KE_KEY, 65, { KEY_PROG4 } },
+	{ KE_KEY, 66, { KEY_TOUCHPAD_OFF } },
+	{ KE_KEY, 67, { KEY_TOUCHPAD_ON } },
 	{ KE_END, 0 },
 };
 
@@ -587,6 +632,28 @@
 		ideapad_input_report(priv, 16);
 }
 
+static void ideapad_check_special_buttons(struct ideapad_private *priv)
+{
+	unsigned long bit, value;
+
+	read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
+
+	for (bit = 0; bit < 16; bit++) {
+		if (test_bit(bit, &value)) {
+			switch (bit) {
+			case 6:
+				/* Thermal Management button */
+				ideapad_input_report(priv, 65);
+				break;
+			case 1:
+				/* OneKey Theater button */
+				ideapad_input_report(priv, 64);
+				break;
+			}
+		}
+	}
+}
+
 /*
  * backlight
  */
@@ -691,6 +758,24 @@
 };
 MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
 
+static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
+{
+	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+	unsigned long value;
+
+	/* Without reading from EC touchpad LED doesn't switch state */
+	if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
+		/* Some IdeaPads don't really turn off touchpad - they only
+		 * switch the LED state. We (de)activate KBC AUX port to turn
+		 * touchpad off and on. We send KEY_TOUCHPAD_OFF and
+		 * KEY_TOUCHPAD_ON to not to get out of sync with LED */
+		unsigned char param;
+		i8042_command(&param, value ? I8042_CMD_AUX_ENABLE :
+			      I8042_CMD_AUX_DISABLE);
+		ideapad_input_report(priv, value ? 67 : 66);
+	}
+}
+
 static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
 {
 	int ret, i;
@@ -727,6 +812,7 @@
 			priv->rfk[i] = NULL;
 	}
 	ideapad_sync_rfk_state(priv);
+	ideapad_sync_touchpad_state(adevice);
 
 	if (!acpi_video_backlight_support()) {
 		ret = ideapad_backlight_init(priv);
@@ -785,9 +871,14 @@
 				ideapad_sync_rfk_state(priv);
 				break;
 			case 13:
+			case 11:
+			case 7:
 			case 6:
 				ideapad_input_report(priv, vpc_bit);
 				break;
+			case 5:
+				ideapad_sync_touchpad_state(adevice);
+				break;
 			case 4:
 				ideapad_backlight_notify_brightness(priv);
 				break;
@@ -797,6 +888,9 @@
 			case 2:
 				ideapad_backlight_notify_power(priv);
 				break;
+			case 0:
+				ideapad_check_special_buttons(priv);
+				break;
 			default:
 				pr_info("Unknown event: %lu\n", vpc_bit);
 			}
@@ -804,6 +898,15 @@
 	}
 }
 
+static int ideapad_acpi_resume(struct device *device)
+{
+	ideapad_sync_rfk_state(ideapad_priv);
+	ideapad_sync_touchpad_state(to_acpi_device(device));
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
+
 static struct acpi_driver ideapad_acpi_driver = {
 	.name = "ideapad_acpi",
 	.class = "IdeaPad",
@@ -811,6 +914,7 @@
 	.ops.add = ideapad_acpi_add,
 	.ops.remove = ideapad_acpi_remove,
 	.ops.notify = ideapad_acpi_notify,
+	.drv.pm = &ideapad_pm,
 	.owner = THIS_MODULE,
 };
 
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index f644418..2111dbb 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -85,7 +85,9 @@
 #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS	0xe4
 #define MSI_STANDARD_EC_TOUCHPAD_MASK		(1 << 4)
 
+#ifdef CONFIG_PM_SLEEP
 static int msi_laptop_resume(struct device *device);
+#endif
 static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
 
 #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS	0x2f
@@ -753,6 +755,7 @@
 	return retval;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int msi_laptop_resume(struct device *device)
 {
 	u8 data;
@@ -773,6 +776,7 @@
 
 	return 0;
 }
+#endif
 
 static int __init msi_laptop_input_setup(void)
 {
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 2448007..8e8caa7 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -188,7 +188,9 @@
 };
 MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_pcc_hotkey_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume);
 
 static struct acpi_driver acpi_pcc_driver = {
@@ -540,6 +542,7 @@
 
 /* kernel module interface */
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_pcc_hotkey_resume(struct device *dev)
 {
 	struct pcc_acpi *pcc;
@@ -556,6 +559,7 @@
 
 	return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
 }
+#endif
 
 static int acpi_pcc_hotkey_add(struct acpi_device *device)
 {
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 9363969..daaddec 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -140,7 +140,10 @@
 		 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
 		 "(default: 0)");
 
+#ifdef CONFIG_PM_SLEEP
 static void sony_nc_kbd_backlight_resume(void);
+static void sony_nc_thermal_resume(void);
+#endif
 static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
 		unsigned int handle);
 static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
@@ -151,7 +154,6 @@
 
 static int sony_nc_thermal_setup(struct platform_device *pd);
 static void sony_nc_thermal_cleanup(struct platform_device *pd);
-static void sony_nc_thermal_resume(void);
 
 static int sony_nc_lid_resume_setup(struct platform_device *pd);
 static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
@@ -1431,6 +1433,7 @@
 	sony_nc_handles_cleanup(pd);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void sony_nc_function_resume(void)
 {
 	unsigned int i, result, bitmask, arg;
@@ -1508,6 +1511,7 @@
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume);
 
@@ -1872,6 +1876,7 @@
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void sony_nc_kbd_backlight_resume(void)
 {
 	int ignore = 0;
@@ -1888,6 +1893,7 @@
 				(kbdbl_ctl->base + 0x200) |
 				(kbdbl_ctl->timeout << 0x10), &ignore);
 }
+#endif
 
 struct battery_care_control {
 	struct device_attribute attrs[2];
@@ -2210,6 +2216,7 @@
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void sony_nc_thermal_resume(void)
 {
 	unsigned int status = sony_nc_thermal_mode_get();
@@ -2217,6 +2224,7 @@
 	if (status != th_handle->mode)
 		sony_nc_thermal_mode_set(th_handle->mode);
 }
+#endif
 
 /* resume on LID open */
 struct snc_lid_resume_control {
@@ -4287,6 +4295,7 @@
 	return result;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int sony_pic_suspend(struct device *dev)
 {
 	if (sony_pic_disable(to_acpi_device(dev)))
@@ -4300,6 +4309,7 @@
 			spic_dev.cur_ioport, spic_dev.cur_irq);
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume);
 
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e7f7328..80e3779 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -922,6 +922,7 @@
 static struct mutex tpacpi_inputdev_send_mutex;
 static LIST_HEAD(tpacpi_all_drivers);
 
+#ifdef CONFIG_PM_SLEEP
 static int tpacpi_suspend_handler(struct device *dev)
 {
 	struct ibm_struct *ibm, *itmp;
@@ -949,6 +950,7 @@
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(tpacpi_pm,
 			 tpacpi_suspend_handler, tpacpi_resume_handler);
@@ -8662,6 +8664,13 @@
 		tp->model_str = kstrdup(s, GFP_KERNEL);
 		if (!tp->model_str)
 			return -ENOMEM;
+	} else {
+		s = dmi_get_system_info(DMI_BIOS_VENDOR);
+		if (s && !(strnicmp(s, "Lenovo", 6))) {
+			tp->model_str = kstrdup(s, GFP_KERNEL);
+			if (!tp->model_str)
+				return -ENOMEM;
+		}
 	}
 
 	s = dmi_get_system_info(DMI_PRODUCT_NAME);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index c13ba5b..5f1256d 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1296,6 +1296,7 @@
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int toshiba_acpi_suspend(struct device *device)
 {
 	struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
@@ -1317,6 +1318,7 @@
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
 			 toshiba_acpi_suspend, toshiba_acpi_resume);
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 715a43c..5e5d631 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -41,7 +41,9 @@
 };
 MODULE_DEVICE_TABLE(acpi, bt_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int toshiba_bt_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
 
 static struct acpi_driver toshiba_bt_rfkill_driver = {
@@ -90,10 +92,12 @@
 	toshiba_bluetooth_enable(device->handle);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int toshiba_bt_resume(struct device *dev)
 {
 	return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
 }
+#endif
 
 static int toshiba_bt_rfkill_add(struct acpi_device *device)
 {
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 849c07c..38ba39d 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -77,10 +77,12 @@
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int ebook_switch_resume(struct device *dev)
 {
 	return ebook_send_state(to_acpi_device(dev));
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume);
 
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 722246c..5d44252 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -435,6 +435,9 @@
 				" info %4.4x\n", DBELL_SID(idb.bytes),
 				DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
 		}
+
+		wr_ptr = ioread32(priv->regs +
+				  TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
 	}
 
 	iowrite32(rd_ptr & (IDB_QSIZE - 1),
@@ -445,6 +448,10 @@
 	regval |= TSI721_SR_CHINT_IDBQRCV;
 	iowrite32(regval,
 		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+
+	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+	if (wr_ptr != rd_ptr)
+		schedule_work(&priv->idb_work);
 }
 
 /**
@@ -2212,7 +2219,7 @@
 				  const struct pci_device_id *id)
 {
 	struct tsi721_device *priv;
-	int i, cap;
+	int cap;
 	int err;
 	u32 regval;
 
@@ -2232,12 +2239,15 @@
 	priv->pdev = pdev;
 
 #ifdef DEBUG
+	{
+	int i;
 	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
 		dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
 			i, (unsigned long long)pci_resource_start(pdev, i),
 			(unsigned long)pci_resource_len(pdev, i),
 			pci_resource_flags(pdev, i));
 	}
+	}
 #endif
 	/*
 	 * Verify BAR configuration
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 182b553..c151fd5 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -486,6 +486,7 @@
 		.id   = AB3100_BUCK,
 		.ops  = &regulator_ops_variable_sleepable,
 		.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
+		.volt_table = ldo_e_buck_typ_voltages,
 		.type = REGULATOR_VOLTAGE,
 		.owner = THIS_MODULE,
 		.enable_time = 1000,
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index e9c2085..ce0fe72 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -64,14 +64,15 @@
 static int anatop_get_voltage_sel(struct regulator_dev *reg)
 {
 	struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
-	u32 val;
+	u32 val, mask;
 
 	if (!anatop_reg->control_reg)
 		return -ENOTSUPP;
 
 	val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
-	val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >>
+	mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
 		anatop_reg->vol_bit_shift;
+	val = (val & mask) >> anatop_reg->vol_bit_shift;
 
 	return val - anatop_reg->min_bit_val;
 }
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f092588..4838531 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3217,7 +3217,7 @@
 
 	dev_set_drvdata(&rdev->dev, rdev);
 
-	if (config->ena_gpio) {
+	if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {
 		ret = gpio_request_one(config->ena_gpio,
 				       GPIOF_DIR_OUT | config->ena_gpio_flags,
 				       rdev_get_name(rdev));
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 34b67be..8b5944f 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -57,16 +57,17 @@
 	return -EINVAL;
 }
 
-static int gpio_regulator_set_value(struct regulator_dev *dev,
-					int min, int max, unsigned *selector)
+static int gpio_regulator_set_voltage(struct regulator_dev *dev,
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
 	struct gpio_regulator_data *data = rdev_get_drvdata(dev);
 	int ptr, target = 0, state, best_val = INT_MAX;
 
 	for (ptr = 0; ptr < data->nr_states; ptr++)
 		if (data->states[ptr].value < best_val &&
-		    data->states[ptr].value >= min &&
-		    data->states[ptr].value <= max) {
+		    data->states[ptr].value >= min_uV &&
+		    data->states[ptr].value <= max_uV) {
 			target = data->states[ptr].gpios;
 			best_val = data->states[ptr].value;
 			if (selector)
@@ -85,13 +86,6 @@
 	return 0;
 }
 
-static int gpio_regulator_set_voltage(struct regulator_dev *dev,
-					int min_uV, int max_uV,
-					unsigned *selector)
-{
-	return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
-}
-
 static int gpio_regulator_list_voltage(struct regulator_dev *dev,
 				      unsigned selector)
 {
@@ -106,7 +100,27 @@
 static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
 					int min_uA, int max_uA)
 {
-	return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
+	struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+	int ptr, target = 0, state, best_val = 0;
+
+	for (ptr = 0; ptr < data->nr_states; ptr++)
+		if (data->states[ptr].value > best_val &&
+		    data->states[ptr].value >= min_uA &&
+		    data->states[ptr].value <= max_uA) {
+			target = data->states[ptr].gpios;
+			best_val = data->states[ptr].value;
+		}
+
+	if (best_val == 0)
+		return -EINVAL;
+
+	for (ptr = 0; ptr < data->nr_gpios; ptr++) {
+		state = (target & (1 << ptr)) >> ptr;
+		gpio_set_value(data->gpios[ptr].gpio, state);
+	}
+	data->state = target;
+
+	return 0;
 }
 
 static struct regulator_ops gpio_regulator_voltage_ops = {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 17d19fb..46c7e88 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -486,9 +486,12 @@
 {
 	int ret, voltage;
 
-	ret = ((min_uV - 900000) / 50000) + 1;
-	if (ret < 0)
-		return ret;
+	if (min_uV == 0)
+		return 0;
+
+	if (min_uV < 900000)
+		min_uV = 900000;
+	ret = DIV_ROUND_UP(min_uV - 900000, 50000) + 1;
 
 	/* Map back into a voltage to verify we're still in bounds */
 	voltage = palmas_list_voltage_ldo(rdev, ret);
@@ -586,7 +589,7 @@
 
 	addr = palmas_regs_info[id].ctrl_addr;
 
-	ret = palmas_smps_read(palmas, addr, &reg);
+	ret = palmas_ldo_read(palmas, addr, &reg);
 	if (ret)
 		return ret;
 
@@ -596,7 +599,7 @@
 	if (reg_init->mode_sleep)
 		reg |= PALMAS_LDO1_CTRL_MODE_SLEEP;
 
-	ret = palmas_smps_write(palmas, addr, reg);
+	ret = palmas_ldo_write(palmas, addr, reg);
 	if (ret)
 		return ret;
 
@@ -630,7 +633,7 @@
 
 	ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, &reg);
 	if (ret)
-		goto err_unregister_regulator;
+		return ret;
 
 	if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN)
 		pmic->smps123 = 1;
@@ -676,7 +679,9 @@
 		case PALMAS_REG_SMPS10:
 			pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;
 			pmic->desc[id].ops = &palmas_ops_smps10;
-			pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
+			pmic->desc[id].vsel_reg =
+					PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+							PALMAS_SMPS10_CTRL);
 			pmic->desc[id].vsel_mask = SMPS10_VSEL;
 			pmic->desc[id].enable_reg =
 					PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
@@ -778,8 +783,10 @@
 			reg_init = pdata->reg_init[id];
 			if (reg_init) {
 				ret = palmas_ldo_init(palmas, id, reg_init);
-				if (ret)
+				if (ret) {
+					regulator_unregister(pmic->rdev[id]);
 					goto err_unregister_regulator;
+				}
 			}
 		}
 	}
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index e6da90a..19241fc 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -240,14 +240,16 @@
 	TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
 	TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
 	TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
-	TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
+	TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
 
 	TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
 					ENB, 3, VCC2, 6),
 	TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
 					END, 3, VCC1, 6),
-	TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
-	TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
+	TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1,
+					ENB, 1, VCC1, 2),
+	TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0,
+					ENB, 0, VCC1, 0),
 };
 
 /*
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 242fe90..77a71a5 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1037,7 +1037,7 @@
 TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300);
 TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300);
 TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300);
-TWL4030_FIXED_LDO(VINTANA2, 0x3f, 1500, 11, 100, 0x08);
+TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08);
 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08);
@@ -1048,7 +1048,6 @@
 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0);
 TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0);
 TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0);
-TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0);
 TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34);
 TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10);
 TWL6025_ADJUSTABLE_SMPS(VIO, 0x16);
@@ -1117,7 +1116,7 @@
 	TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6),
 	TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN),
 	TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB),
-	TWLFIXED_OF_MATCH("ti,twl4030-vintana2", VINTANA2),
+	TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1),
 	TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG),
 	TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5),
 	TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8),
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index eb415bd..9592b93 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -582,6 +582,7 @@
 void rtc_update_irq(struct rtc_device *rtc,
 		unsigned long num, unsigned long events)
 {
+	pm_stay_awake(rtc->dev.parent);
 	schedule_work(&rtc->irqwork);
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq);
@@ -844,6 +845,7 @@
 
 	mutex_lock(&rtc->ops_lock);
 again:
+	pm_relax(rtc->dev.parent);
 	__rtc_read_time(rtc, &tm);
 	now = rtc_tm_to_ktime(tm);
 	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 132333d..4267789 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -568,7 +568,6 @@
 		hpet_mask_rtc_irq_bit(RTC_AIE);
 
 		CMOS_READ(RTC_INTR_FLAGS);
-		pm_wakeup_event(cmos_rtc.dev, 0);
 	}
 	spin_unlock(&rtc_lock);
 
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 8361187..13e4df6 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -43,6 +43,7 @@
 #include <linux/rtc.h>
 #include <linux/spi/spi.h>
 #include <linux/module.h>
+#include <linux/sysfs.h>
 
 #define DRV_VERSION "0.6"
 
@@ -292,6 +293,7 @@
 	pdata->rtc = rtc;
 
 	for (i = 0; i < 16; i++) {
+		sysfs_attr_init(&pdata->regs[i].attr.attr);
 		sprintf(pdata->regs[i].name, "%1x", i);
 		pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
 		pdata->regs[i].attr.attr.name = pdata->regs[i].name;
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 77074cc..fd5c7af 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -122,9 +122,12 @@
 	tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
 	tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
 	if (!pdata->rtc_24h) {
-		tm->tm_hour %= 12;
-		if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
+		if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
+			tm->tm_hour -= 20;
+			tm->tm_hour %= 12;
 			tm->tm_hour += 12;
+		} else
+			tm->tm_hour %= 12;
 	}
 	tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
 	tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 6a6f76b..b103293 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -242,11 +242,13 @@
 	switch (sdias_evbuf.event_status) {
 		case EVSTATE_ALL_STORED:
 			TRACE("all stored\n");
+			break;
 		case EVSTATE_PART_STORED:
 			TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
 			break;
 		case EVSTATE_NO_DATA:
 			TRACE("no data\n");
+			/* fall through */
 		default:
 			pr_err("Error from SCLP while copying hsa. "
 			       "Event status = %x\n",
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 2374468..32c26d7 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -324,8 +324,16 @@
 
 		res = irq_create_identity_mapping(d->domain, irq);
 		if (unlikely(res)) {
-			pr_err("can't get irq_desc for %d\n", irq);
-			continue;
+			if (res == -EEXIST) {
+				res = irq_domain_associate(d->domain, irq, irq);
+				if (unlikely(res)) {
+					pr_err("domain association failure\n");
+					continue;
+				}
+			} else {
+				pr_err("can't identity map IRQ %d\n", irq);
+				continue;
+			}
 		}
 
 		intc_irq_xlate_set(irq, vect->enum_id, d);
@@ -345,8 +353,19 @@
 			 */
 			res = irq_create_identity_mapping(d->domain, irq2);
 			if (unlikely(res)) {
-				pr_err("can't get irq_desc for %d\n", irq2);
-				continue;
+				if (res == -EEXIST) {
+					res = irq_domain_associate(d->domain,
+								   irq, irq);
+					if (unlikely(res)) {
+						pr_err("domain association "
+						       "failure\n");
+						continue;
+					}
+				} else {
+					pr_err("can't identity map IRQ %d\n",
+					       irq);
+					continue;
+				}
 			}
 
 			vect2->enum_id = 0;
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 6e25ef1..ea0aaa3 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -438,7 +438,7 @@
 
 static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
 {
-	struct spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
 	struct bcm63xx_spi *bs = spi_master_get_devdata(master);
 
 	spi_unregister_master(master);
@@ -452,6 +452,8 @@
 
 	platform_set_drvdata(pdev, 0);
 
+	spi_master_put(master);
+
 	return 0;
 }
 
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index b2d4b9e..764bfee 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -533,7 +533,6 @@
 	iounmap(mcfqspi->iobase);
 	release_mem_region(res->start, resource_size(res));
 	spi_unregister_master(master);
-	spi_master_put(master);
 
 	return 0;
 }
@@ -541,7 +540,7 @@
 #ifdef CONFIG_PM_SLEEP
 static int mcfqspi_suspend(struct device *dev)
 {
-	struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+	struct spi_master *master = dev_get_drvdata(dev);
 	struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
 
 	spi_master_suspend(master);
@@ -553,7 +552,7 @@
 
 static int mcfqspi_resume(struct device *dev)
 {
-	struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+	struct spi_master *master = dev_get_drvdata(dev);
 	struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
 
 	spi_master_resume(master);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index bc47781..b2fb141 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1228,18 +1228,16 @@
 
 	status = spi_register_master(master);
 	if (status < 0)
-		goto err_spi_register;
+		goto disable_pm;
 
 	return status;
 
-err_spi_register:
-	spi_master_put(master);
 disable_pm:
 	pm_runtime_disable(&pdev->dev);
 dma_chnl_free:
 	kfree(mcspi->dma_channels);
 free_master:
-	kfree(master);
+	spi_master_put(master);
 	platform_set_drvdata(pdev, NULL);
 	return status;
 }
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index aab518e..6abbe23 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2053,7 +2053,6 @@
 	printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
 	       adev->res.start, pl022->virtbase);
 
-	pm_runtime_enable(dev);
 	pm_runtime_resume(dev);
 
 	pl022->clk = clk_get(&adev->dev, NULL);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 646a7657..d1c8441f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -826,7 +826,7 @@
 				struct spi_device *spi)
 {
 	struct s3c64xx_spi_csinfo *cs;
-	struct device_node *slave_np, *data_np;
+	struct device_node *slave_np, *data_np = NULL;
 	u32 fb_delay = 0;
 
 	slave_np = spi->dev.of_node;
@@ -1479,40 +1479,40 @@
 			   s3c64xx_spi_runtime_resume, NULL)
 };
 
-struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
+static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
 	.fifo_lvl_mask	= { 0x7f },
 	.rx_lvl_offset	= 13,
 	.tx_st_done	= 21,
 	.high_speed	= true,
 };
 
-struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
+static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
 	.fifo_lvl_mask	= { 0x7f, 0x7F },
 	.rx_lvl_offset	= 13,
 	.tx_st_done	= 21,
 };
 
-struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
+static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
 	.fifo_lvl_mask	= { 0x1ff, 0x7F },
 	.rx_lvl_offset	= 15,
 	.tx_st_done	= 25,
 };
 
-struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
+static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
 	.fifo_lvl_mask	= { 0x7f, 0x7F },
 	.rx_lvl_offset	= 13,
 	.tx_st_done	= 21,
 	.high_speed	= true,
 };
 
-struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
+static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
 	.fifo_lvl_mask	= { 0x1ff, 0x7F },
 	.rx_lvl_offset	= 15,
 	.tx_st_done	= 25,
 	.high_speed	= true,
 };
 
-struct s3c64xx_spi_port_config exynos4_spi_port_config = {
+static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
 	.fifo_lvl_mask	= { 0x1ff, 0x7F, 0x7F },
 	.rx_lvl_offset	= 15,
 	.tx_st_done	= 25,
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 7e2ddc04..c625086 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -190,16 +190,30 @@
 {
 	struct ssb_bus *bus = mcore->dev->bus;
 
-	mcore->flash_buswidth = 2;
-	if (bus->chipco.dev) {
-		mcore->flash_window = 0x1c000000;
-		mcore->flash_window_size = 0x02000000;
+	/* When there is no chipcommon on the bus there is 4MB flash */
+	if (!bus->chipco.dev) {
+		mcore->flash_buswidth = 2;
+		mcore->flash_window = SSB_FLASH1;
+		mcore->flash_window_size = SSB_FLASH1_SZ;
+		return;
+	}
+
+	/* There is ChipCommon, so use it to read info about flash */
+	switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
+	case SSB_CHIPCO_FLASHT_STSER:
+	case SSB_CHIPCO_FLASHT_ATSER:
+		pr_err("Serial flash not supported\n");
+		break;
+	case SSB_CHIPCO_FLASHT_PARA:
+		pr_debug("Found parallel flash\n");
+		mcore->flash_window = SSB_FLASH2;
+		mcore->flash_window_size = SSB_FLASH2_SZ;
 		if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
 		               & SSB_CHIPCO_CFG_DS16) == 0)
 			mcore->flash_buswidth = 1;
-	} else {
-		mcore->flash_window = 0x1fc00000;
-		mcore->flash_window_size = 0x00400000;
+		else
+			mcore->flash_buswidth = 2;
+		break;
 	}
 }
 
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index c0fdb00..2359151 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -168,7 +168,7 @@
 			dev->board_ptr = comedi_recognize(driv, it->board_name);
 			if (dev->board_ptr)
 				break;
-		} else if (strcmp(driv->driver_name, it->board_name))
+		} else if (strcmp(driv->driver_name, it->board_name) == 0)
 			break;
 		module_put(driv->module);
 	}
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 3198660..6b4d0d68 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -1349,9 +1349,6 @@
 		}
 		if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH)
 			continue;
-		if (pci_is_enabled(pcidev))
-			continue;
-
 		if (strcmp(this_board->name, DRV_NAME) == 0) {
 			for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) {
 				if (pcidev->device == boardtypes[i].device_id) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index da5ee69..dfde0f6 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -301,8 +301,6 @@
 		}
 		if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH)
 			continue;
-		if (pci_is_enabled(pcidev))
-			continue;
 		return pcidev;
 	}
 	dev_err(dev->class_dev,
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 97f06dc..2d4cb7f 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1064,8 +1064,6 @@
 			    slot != PCI_SLOT(pcidev->devfn))
 				continue;
 		}
-		if (pci_is_enabled(pcidev))
-			continue;
 		for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) {
 			if (boardtypes[i].vendor_id != pcidev->vendor)
 				continue;
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index ef28385..cad559a 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -718,7 +718,8 @@
 				continue;
 		}
 		if (pcidev->vendor != PCI_VENDOR_ID_IOTECH ||
-		    pcidev->device != 0x0409)
+		    pcidev->device != 0x0409 ||
+		    pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
 			continue;
 
 		for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
@@ -739,6 +740,7 @@
 {
 	struct pci_dev *pcidev;
 	struct comedi_subdevice *s;
+	resource_size_t pci_base;
 	void *aux_data;
 	unsigned int aux_len;
 	int result;
@@ -758,11 +760,12 @@
 			"failed to enable PCI device and request regions\n");
 		return -EIO;
 	}
-	dev->iobase = pci_resource_start(pcidev, 2);
+	dev->iobase = 1;	/* the "detach" needs this */
 
-	devpriv->plx =
-	    ioremap(pci_resource_start(pcidev, 0), DAQBOARD2000_PLX_SIZE);
-	devpriv->daq = ioremap(dev->iobase, DAQBOARD2000_DAQ_SIZE);
+	pci_base = pci_resource_start(pcidev, 0);
+	devpriv->plx = ioremap(pci_base, DAQBOARD2000_PLX_SIZE);
+	pci_base = pci_resource_start(pcidev, 2);
+	devpriv->daq = ioremap(pci_base, DAQBOARD2000_DAQ_SIZE);
 	if (!devpriv->plx || !devpriv->daq)
 		return -ENOMEM;
 
@@ -799,8 +802,6 @@
 	   printk("Interrupt after is: %x\n", interrupt);
 	 */
 
-	dev->iobase = (unsigned long)devpriv->daq;
-
 	dev->board_name = this_board->name;
 
 	s = dev->subdevices + 0;
@@ -824,7 +825,7 @@
 
 	s = dev->subdevices + 2;
 	result = subdev_8255_init(dev, s, daqboard2000_8255_cb,
-				  (unsigned long)(dev->iobase + 0x40));
+				  (unsigned long)(devpriv->daq + 0x40));
 
 out:
 	return result;
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index a6fe6c9..3476cda 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -804,6 +804,7 @@
 {
 	struct pci_dev *pcidev;
 	struct comedi_subdevice *s;
+	resource_size_t pci_base;
 	int ret = 0;
 
 	dev_dbg(dev->class_dev, "dt3000:\n");
@@ -820,9 +821,10 @@
 	ret = comedi_pci_enable(pcidev, "dt3000");
 	if (ret < 0)
 		return ret;
+	dev->iobase = 1;	/* the "detach" needs this */
 
-	dev->iobase = pci_resource_start(pcidev, 0);
-	devpriv->io_addr = ioremap(dev->iobase, DT3000_SIZE);
+	pci_base  = pci_resource_start(pcidev, 0);
+	devpriv->io_addr = ioremap(pci_base, DT3000_SIZE);
 	if (!devpriv->io_addr)
 		return -ENOMEM;
 
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 112fdc3..5aa8be1 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -1619,9 +1619,8 @@
 	struct rtdPrivate *devpriv;
 	struct pci_dev *pcidev;
 	struct comedi_subdevice *s;
+	resource_size_t pci_base;
 	int ret;
-	resource_size_t physLas1;	/* data area */
-	resource_size_t physLcfg;	/* PLX9080 */
 #ifdef USE_DMA
 	int index;
 #endif
@@ -1655,20 +1654,15 @@
 		printk(KERN_INFO "Failed to enable PCI device and request regions.\n");
 		return ret;
 	}
+	dev->iobase = 1;	/* the "detach" needs this */
 
-	/*
-	 * Initialize base addresses
-	 */
-	/* Get the physical address from PCI config */
-	dev->iobase = pci_resource_start(pcidev, LAS0_PCIINDEX);
-	physLas1 = pci_resource_start(pcidev, LAS1_PCIINDEX);
-	physLcfg = pci_resource_start(pcidev, LCFG_PCIINDEX);
-	/* Now have the kernel map this into memory */
-	/* ASSUME page aligned */
-	devpriv->las0 = ioremap_nocache(dev->iobase, LAS0_PCISIZE);
-	devpriv->las1 = ioremap_nocache(physLas1, LAS1_PCISIZE);
-	devpriv->lcfg = ioremap_nocache(physLcfg, LCFG_PCISIZE);
-
+	/* Initialize the base addresses */
+	pci_base = pci_resource_start(pcidev, LAS0_PCIINDEX);
+	devpriv->las0 = ioremap_nocache(pci_base, LAS0_PCISIZE);
+	pci_base = pci_resource_start(pcidev, LAS1_PCIINDEX);
+	devpriv->las1 = ioremap_nocache(pci_base, LAS1_PCISIZE);
+	pci_base = pci_resource_start(pcidev, LCFG_PCIINDEX);
+	devpriv->lcfg = ioremap_nocache(pci_base, LCFG_PCISIZE);
 	if (!devpriv->las0 || !devpriv->las1 || !devpriv->lcfg)
 		return -ENOMEM;
 
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 848c7ec..11ee836 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -102,6 +102,7 @@
 #define BULK_TIMEOUT 1000
 
 /* constants for "firmware" upload and download */
+#define FIRMWARE "usbdux_firmware.bin"
 #define USBDUXSUB_FIRMWARE 0xA0
 #define VENDOR_DIR_IN  0xC0
 #define VENDOR_DIR_OUT 0x40
@@ -2791,7 +2792,7 @@
 
 	ret = request_firmware_nowait(THIS_MODULE,
 				      FW_ACTION_HOTPLUG,
-				      "usbdux_firmware.bin",
+				      FIRMWARE,
 				      &udev->dev,
 				      GFP_KERNEL,
 				      usbduxsub + index,
@@ -2850,3 +2851,4 @@
 MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
 MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com");
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index d991158..8eb41257 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -57,6 +57,7 @@
 /*
  * constants for "firmware" upload and download
  */
+#define FIRMWARE		"usbduxfast_firmware.bin"
 #define USBDUXFASTSUB_FIRMWARE	0xA0
 #define VENDOR_DIR_IN		0xC0
 #define VENDOR_DIR_OUT		0x40
@@ -1706,7 +1707,7 @@
 
 	ret = request_firmware_nowait(THIS_MODULE,
 				      FW_ACTION_HOTPLUG,
-				      "usbduxfast_firmware.bin",
+				      FIRMWARE,
 				      &udev->dev,
 				      GFP_KERNEL,
 				      usbduxfastsub + index,
@@ -1774,3 +1775,4 @@
 MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
 MODULE_DESCRIPTION("USB-DUXfast, BerndPorr@f2s.com");
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 543e604..f54ab8c 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -63,6 +63,7 @@
 #define BULK_TIMEOUT 1000
 
 /* constants for "firmware" upload and download */
+#define FIRMWARE "usbduxsigma_firmware.bin"
 #define USBDUXSUB_FIRMWARE 0xA0
 #define VENDOR_DIR_IN  0xC0
 #define VENDOR_DIR_OUT 0x40
@@ -2780,7 +2781,7 @@
 
 	ret = request_firmware_nowait(THIS_MODULE,
 				      FW_ACTION_HOTPLUG,
-				      "usbduxsigma_firmware.bin",
+				      FIRMWARE,
 				      &udev->dev,
 				      GFP_KERNEL,
 				      usbduxsub + index,
@@ -2845,3 +2846,4 @@
 MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
 MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com");
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/csr/Kconfig b/drivers/staging/csr/Kconfig
index cee8d48..ad2a109 100644
--- a/drivers/staging/csr/Kconfig
+++ b/drivers/staging/csr/Kconfig
@@ -1,6 +1,6 @@
 config CSR_WIFI
 	tristate "CSR wireless driver"
-	depends on MMC && CFG80211_WEXT
+	depends on MMC && CFG80211_WEXT && INET
 	select WIRELESS_EXT
 	select WEXT_PRIV
 	help
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 22c3923..0958372 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -754,7 +754,7 @@
 		else
 			st->mode &= ~AD7192_MODE_ACX;
 
-		ad7192_write_reg(st, AD7192_REG_GPOCON, 3, st->mode);
+		ad7192_write_reg(st, AD7192_REG_MODE, 3, st->mode);
 		break;
 	default:
 		ret = -EINVAL;
@@ -798,6 +798,11 @@
 	.attrs = ad7195_attributes,
 };
 
+static unsigned int ad7192_get_temp_scale(bool unipolar)
+{
+	return unipolar ? 2815 * 2 : 2815;
+}
+
 static int ad7192_read_raw(struct iio_dev *indio_dev,
 			   struct iio_chan_spec const *chan,
 			   int *val,
@@ -824,19 +829,6 @@
 		*val = (smpl >> chan->scan_type.shift) &
 			((1 << (chan->scan_type.realbits)) - 1);
 
-		switch (chan->type) {
-		case IIO_VOLTAGE:
-			if (!unipolar)
-				*val -= (1 << (chan->scan_type.realbits - 1));
-			break;
-		case IIO_TEMP:
-			*val -= 0x800000;
-			*val /= 2815; /* temp Kelvin */
-			*val -= 273; /* temp Celsius */
-			break;
-		default:
-			return -EINVAL;
-		}
 		return IIO_VAL_INT;
 
 	case IIO_CHAN_INFO_SCALE:
@@ -848,11 +840,21 @@
 			mutex_unlock(&indio_dev->mlock);
 			return IIO_VAL_INT_PLUS_NANO;
 		case IIO_TEMP:
-			*val =  1000;
-			return IIO_VAL_INT;
+			*val = 0;
+			*val2 = 1000000000 / ad7192_get_temp_scale(unipolar);
+			return IIO_VAL_INT_PLUS_NANO;
 		default:
 			return -EINVAL;
 		}
+	case IIO_CHAN_INFO_OFFSET:
+		if (!unipolar)
+			*val = -(1 << (chan->scan_type.realbits - 1));
+		else
+			*val = 0;
+		/* Kelvin to Celsius */
+		if (chan->type == IIO_TEMP)
+			*val -= 273 * ad7192_get_temp_scale(unipolar);
+		return IIO_VAL_INT;
 	}
 
 	return -EINVAL;
@@ -890,7 +892,7 @@
 				}
 				ret = 0;
 			}
-
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -942,20 +944,22 @@
 	  .channel = _chan,						\
 	  .channel2 = _chan2,						\
 	  .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |			\
-	  IIO_CHAN_INFO_SCALE_SHARED_BIT,				\
+	  IIO_CHAN_INFO_SCALE_SHARED_BIT |				\
+	  IIO_CHAN_INFO_OFFSET_SHARED_BIT,				\
 	  .address = _address,						\
 	  .scan_index = _si,						\
-	  .scan_type =  IIO_ST('s', 24, 32, 0)}
+	  .scan_type =  IIO_ST('u', 24, 32, 0)}
 
 #define AD7192_CHAN(_chan, _address, _si)				\
 	{ .type = IIO_VOLTAGE,						\
 	  .indexed = 1,							\
 	  .channel = _chan,						\
 	  .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |			\
-	  IIO_CHAN_INFO_SCALE_SHARED_BIT,				\
+	  IIO_CHAN_INFO_SCALE_SHARED_BIT |				\
+	  IIO_CHAN_INFO_OFFSET_SHARED_BIT,				\
 	  .address = _address,						\
 	  .scan_index = _si,						\
-	  .scan_type =  IIO_ST('s', 24, 32, 0)}
+	  .scan_type =  IIO_ST('u', 24, 32, 0)}
 
 #define AD7192_CHAN_TEMP(_chan, _address, _si)				\
 	{ .type = IIO_TEMP,						\
@@ -965,7 +969,7 @@
 	  IIO_CHAN_INFO_SCALE_SEPARATE_BIT,				\
 	  .address = _address,						\
 	  .scan_index = _si,						\
-	  .scan_type =  IIO_ST('s', 24, 32, 0)}
+	  .scan_type =  IIO_ST('u', 24, 32, 0)}
 
 static struct iio_chan_spec ad7192_channels[] = {
 	AD7192_CHAN_DIFF(1, 2, NULL, AD7192_CH_AIN1P_AIN2M, 0),
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index fd1d855..506016f 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -76,7 +76,7 @@
 	struct iio_dev *indio_dev = pf->indio_dev;
 	struct ad7298_state *st = iio_priv(indio_dev);
 	struct iio_buffer *ring = indio_dev->buffer;
-	s64 time_ns;
+	s64 time_ns = 0;
 	__u16 buf[16];
 	int b_sent, i;
 
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 1ece2ac..19ee49c 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -131,9 +131,10 @@
 			.indexed = 1,
 			.channel = 0,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_type = {
-				.sign = 's',
+				.sign = 'u',
 				.realbits = 24,
 				.storagebits = 32,
 				.shift = 8,
@@ -146,9 +147,10 @@
 			.indexed = 1,
 			.channel = 0,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_type = {
-				.sign = 's',
+				.sign = 'u',
 				.realbits = 20,
 				.storagebits = 32,
 				.shift = 12,
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 76fdd71..112e2b7 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -563,8 +563,9 @@
 	return len;
 }
 
-static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in-in_scale_available,
-			     S_IRUGO, ad7793_show_scale_available, NULL, 0);
+static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
+		in_voltage-voltage_scale_available, S_IRUGO,
+		ad7793_show_scale_available, NULL, 0);
 
 static struct attribute *ad7793_attributes[] = {
 	&iio_dev_attr_sampling_frequency.dev_attr.attr,
@@ -604,9 +605,6 @@
 		*val = (smpl >> chan->scan_type.shift) &
 			((1 << (chan->scan_type.realbits)) - 1);
 
-		if (!unipolar)
-			*val -= (1 << (chan->scan_type.realbits - 1));
-
 		return IIO_VAL_INT;
 
 	case IIO_CHAN_INFO_SCALE:
@@ -620,25 +618,38 @@
 				return IIO_VAL_INT_PLUS_NANO;
 			} else {
 				/* 1170mV / 2^23 * 6 */
-				scale_uv = (1170ULL * 100000000ULL * 6ULL)
-					>> (chan->scan_type.realbits -
-					    (unipolar ? 0 : 1));
+				scale_uv = (1170ULL * 100000000ULL * 6ULL);
 			}
 			break;
 		case IIO_TEMP:
-			/* Always uses unity gain and internal ref */
-			scale_uv = (2500ULL * 100000000ULL)
-				>> (chan->scan_type.realbits -
-				(unipolar ? 0 : 1));
+				/* 1170mV / 0.81 mV/C / 2^23 */
+				scale_uv = 1444444444444ULL;
 			break;
 		default:
 			return -EINVAL;
 		}
 
-		*val2 = do_div(scale_uv, 100000000) * 10;
-		*val =  scale_uv;
-
+		scale_uv >>= (chan->scan_type.realbits - (unipolar ? 0 : 1));
+		*val = 0;
+		*val2 = scale_uv;
 		return IIO_VAL_INT_PLUS_NANO;
+	case IIO_CHAN_INFO_OFFSET:
+		if (!unipolar)
+			*val = -(1 << (chan->scan_type.realbits - 1));
+		else
+			*val = 0;
+
+		/* Kelvin to Celsius */
+		if (chan->type == IIO_TEMP) {
+			unsigned long long offset;
+			unsigned int shift;
+
+			shift = chan->scan_type.realbits - (unipolar ? 0 : 1);
+			offset = 273ULL << shift;
+			do_div(offset, 1444);
+			*val -= offset;
+		}
+		return IIO_VAL_INT;
 	}
 	return -EINVAL;
 }
@@ -676,7 +687,7 @@
 				}
 				ret = 0;
 			}
-
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -720,9 +731,10 @@
 			.channel2 = 0,
 			.address = AD7793_CH_AIN1P_AIN1M,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 0,
-			.scan_type = IIO_ST('s', 24, 32, 0)
+			.scan_type = IIO_ST('u', 24, 32, 0)
 		},
 		.channel[1] = {
 			.type = IIO_VOLTAGE,
@@ -732,9 +744,10 @@
 			.channel2 = 1,
 			.address = AD7793_CH_AIN2P_AIN2M,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 1,
-			.scan_type = IIO_ST('s', 24, 32, 0)
+			.scan_type = IIO_ST('u', 24, 32, 0)
 		},
 		.channel[2] = {
 			.type = IIO_VOLTAGE,
@@ -744,9 +757,10 @@
 			.channel2 = 2,
 			.address = AD7793_CH_AIN3P_AIN3M,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 2,
-			.scan_type = IIO_ST('s', 24, 32, 0)
+			.scan_type = IIO_ST('u', 24, 32, 0)
 		},
 		.channel[3] = {
 			.type = IIO_VOLTAGE,
@@ -757,9 +771,10 @@
 			.channel2 = 2,
 			.address = AD7793_CH_AIN1M_AIN1M,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 3,
-			.scan_type = IIO_ST('s', 24, 32, 0)
+			.scan_type = IIO_ST('u', 24, 32, 0)
 		},
 		.channel[4] = {
 			.type = IIO_TEMP,
@@ -769,7 +784,7 @@
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
 			IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
 			.scan_index = 4,
-			.scan_type = IIO_ST('s', 24, 32, 0),
+			.scan_type = IIO_ST('u', 24, 32, 0),
 		},
 		.channel[5] = {
 			.type = IIO_VOLTAGE,
@@ -778,9 +793,10 @@
 			.channel = 4,
 			.address = AD7793_CH_AVDD_MONITOR,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+			IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 5,
-			.scan_type = IIO_ST('s', 24, 32, 0),
+			.scan_type = IIO_ST('u', 24, 32, 0),
 		},
 		.channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),
 	},
@@ -793,9 +809,10 @@
 			.channel2 = 0,
 			.address = AD7793_CH_AIN1P_AIN1M,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 0,
-			.scan_type = IIO_ST('s', 16, 32, 0)
+			.scan_type = IIO_ST('u', 16, 32, 0)
 		},
 		.channel[1] = {
 			.type = IIO_VOLTAGE,
@@ -805,9 +822,10 @@
 			.channel2 = 1,
 			.address = AD7793_CH_AIN2P_AIN2M,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 1,
-			.scan_type = IIO_ST('s', 16, 32, 0)
+			.scan_type = IIO_ST('u', 16, 32, 0)
 		},
 		.channel[2] = {
 			.type = IIO_VOLTAGE,
@@ -817,9 +835,10 @@
 			.channel2 = 2,
 			.address = AD7793_CH_AIN3P_AIN3M,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 2,
-			.scan_type = IIO_ST('s', 16, 32, 0)
+			.scan_type = IIO_ST('u', 16, 32, 0)
 		},
 		.channel[3] = {
 			.type = IIO_VOLTAGE,
@@ -830,9 +849,10 @@
 			.channel2 = 2,
 			.address = AD7793_CH_AIN1M_AIN1M,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SHARED_BIT,
+			IIO_CHAN_INFO_SCALE_SHARED_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 3,
-			.scan_type = IIO_ST('s', 16, 32, 0)
+			.scan_type = IIO_ST('u', 16, 32, 0)
 		},
 		.channel[4] = {
 			.type = IIO_TEMP,
@@ -842,7 +862,7 @@
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
 			IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
 			.scan_index = 4,
-			.scan_type = IIO_ST('s', 16, 32, 0),
+			.scan_type = IIO_ST('u', 16, 32, 0),
 		},
 		.channel[5] = {
 			.type = IIO_VOLTAGE,
@@ -851,9 +871,10 @@
 			.channel = 4,
 			.address = AD7793_CH_AVDD_MONITOR,
 			.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
-			IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+			IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+			IIO_CHAN_INFO_OFFSET_SHARED_BIT,
 			.scan_index = 5,
-			.scan_type = IIO_ST('s', 16, 32, 0),
+			.scan_type = IIO_ST('u', 16, 32, 0),
 		},
 		.channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),
 	},
@@ -901,7 +922,7 @@
 	else if (voltage_uv)
 		st->int_vref_mv = voltage_uv / 1000;
 	else
-		st->int_vref_mv = 2500; /* Build-in ref */
+		st->int_vref_mv = 1170; /* Build-in ref */
 
 	spi_set_drvdata(spi, indio_dev);
 	st->spi = spi;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index b06fd5b..d536756 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -189,7 +189,7 @@
 // Static vars definitions
 //
 
-static struct usb_device_id vt6656_table[] __devinitdata = {
+static struct usb_device_id vt6656_table[] = {
 	{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
 	{}
 };
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index ef36054..48aa136 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -25,7 +25,7 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION("0.1");
 
-static const struct usb_device_id wb35_table[] __devinitconst = {
+static const struct usb_device_id wb35_table[] = {
 	{ USB_DEVICE(0x0416, 0x0035) },
 	{ USB_DEVICE(0x18E8, 0x6201) },
 	{ USB_DEVICE(0x18E8, 0x6206) },
@@ -119,7 +119,9 @@
 	*total_flags = new_flags;
 }
 
-static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void wbsoft_tx(struct ieee80211_hw *dev,
+		      struct ieee80211_tx_control *control,
+		      struct sk_buff *skb)
 {
 	struct wbsoft_priv *priv = dev->priv;
 
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 070b442..4720b4b 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -160,10 +160,12 @@
 
 config SERIAL_CLPS711X
 	tristate "CLPS711X serial port support"
-	depends on ARM && ARCH_CLPS711X
+	depends on ARCH_CLPS711X
 	select SERIAL_CORE
+	default y
 	help
-	  ::: To be written :::
+	  This enables the driver for the on-chip UARTs of the Cirrus
+	  Logic EP711x/EP721x/EP731x processors.
 
 config SERIAL_CLPS711X_CONSOLE
 	bool "Support for console on CLPS711X serial port"
@@ -173,9 +175,7 @@
 	  Even if you say Y here, the currently visible virtual console
 	  (/dev/tty0) will still be used as the system console by default, but
 	  you can alter that using a kernel command line option such as
-	  "console=ttyCL1". (Try "man bootparam" or see the documentation of
-	  your boot loader (lilo or loadlin) about how to pass options to the
-	  kernel at boot time.)
+	  "console=ttyCL1".
 
 config SERIAL_SAMSUNG
 	tristate "Samsung SoC serial support"
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 144cd39..3ad079f 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1331,7 +1331,7 @@
 MODULE_DEVICE_TABLE(spi, ifx_id_table);
 
 /* spi operations */
-static const struct spi_driver ifx_spi_driver = {
+static struct spi_driver ifx_spi_driver = {
 	.driver = {
 		.name = DRVNAME,
 		.pm = &ifx_spi_pm,
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 2e341b8..3a667ee 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -73,6 +73,7 @@
 #define AUART_CTRL0_CLKGATE			(1 << 30)
 
 #define AUART_CTRL2_CTSEN			(1 << 15)
+#define AUART_CTRL2_RTSEN			(1 << 14)
 #define AUART_CTRL2_RTS				(1 << 11)
 #define AUART_CTRL2_RXE				(1 << 9)
 #define AUART_CTRL2_TXE				(1 << 8)
@@ -259,9 +260,12 @@
 
 	u32 ctrl = readl(u->membase + AUART_CTRL2);
 
-	ctrl &= ~AUART_CTRL2_RTS;
-	if (mctrl & TIOCM_RTS)
-		ctrl |= AUART_CTRL2_RTS;
+	ctrl &= ~AUART_CTRL2_RTSEN;
+	if (mctrl & TIOCM_RTS) {
+		if (u->state->port.flags & ASYNC_CTS_FLOW)
+			ctrl |= AUART_CTRL2_RTSEN;
+	}
+
 	s->ctrl = mctrl;
 	writel(ctrl, u->membase + AUART_CTRL2);
 }
@@ -359,9 +363,9 @@
 
 	/* figure out the hardware flow control settings */
 	if (cflag & CRTSCTS)
-		ctrl2 |= AUART_CTRL2_CTSEN;
+		ctrl2 |= AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN;
 	else
-		ctrl2 &= ~AUART_CTRL2_CTSEN;
+		ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN);
 
 	/* set baud rate */
 	baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 654755a..333c8d0 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1348,10 +1348,16 @@
 static int pmz_poll_get_char(struct uart_port *port)
 {
 	struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+	int tries = 2;
 
-	while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0)
-		udelay(5);
-	return read_zsdata(uap);
+	while (tries) {
+		if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0)
+			return read_zsdata(uap);
+		if (tries--)
+			udelay(5);
+	}
+
+	return NO_POLL_CHAR;
 }
 
 static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index a7773a3..7065df6 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -13,7 +13,7 @@
 	default y if PXA3xx
 	default y if ARCH_EP93XX
 	default y if ARCH_AT91
-	default y if ARCH_PNX4008 && I2C
+	default y if ARCH_PNX4008
 	default y if MFD_TC6393XB
 	default y if ARCH_W90X900
 	default y if ARCH_DAVINCI_DA8XX
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 8337fb5..47e499c 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,9 +1,9 @@
 config USB_CHIPIDEA
 	tristate "ChipIdea Highspeed Dual Role Controller"
-	depends on USB
+	depends on USB || USB_GADGET
 	help
-          Say Y here if your system has a dual role high speed USB
-          controller based on ChipIdea silicon IP. Currently, only the
+	  Say Y here if your system has a dual role high speed USB
+	  controller based on ChipIdea silicon IP. Currently, only the
 	  peripheral mode is supported.
 
 	  When compiled dynamically, the module will be called ci-hdrc.ko.
@@ -12,7 +12,7 @@
 
 config USB_CHIPIDEA_UDC
 	bool "ChipIdea device controller"
-	depends on USB_GADGET
+	depends on USB_GADGET=y || USB_GADGET=USB_CHIPIDEA
 	select USB_GADGET_DUALSPEED
 	help
 	  Say Y here to enable device controller functionality of the
@@ -20,6 +20,7 @@
 
 config USB_CHIPIDEA_HOST
 	bool "ChipIdea host controller"
+	depends on USB=y || USB=USB_CHIPIDEA
 	select USB_EHCI_ROOT_HUB_TT
 	help
 	  Say Y here to enable host controller functionality of the
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 56d6bf6..f763ed7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1104,7 +1104,8 @@
 	}
 
 
-	if (data_interface->cur_altsetting->desc.bNumEndpoints < 2)
+	if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
+	    control_interface->cur_altsetting->desc.bNumEndpoints == 0)
 		return -EINVAL;
 
 	epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index ee0ebac..89dcf15 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -450,7 +450,7 @@
 	writel(FLAG_CF, &ehci_regs->configured_flag);
 
 	/* Wait until the controller is no longer halted */
-	loop = 10;
+	loop = 1000;
 	do {
 		status = readl(&ehci_regs->status);
 		if (!(status & STS_HALT))
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 90e82e2..0e52309 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -669,6 +669,8 @@
 	spin_lock_irqsave(&dev->lock, flags);
 	if (dev->port_usb) {
 		struct gether	*link = dev->port_usb;
+		const struct usb_endpoint_descriptor *in;
+		const struct usb_endpoint_descriptor *out;
 
 		if (link->close)
 			link->close(link);
@@ -682,10 +684,14 @@
 		 * their own pace; the network stack can handle old packets.
 		 * For the moment we leave this here, since it works.
 		 */
+		in = link->in_ep->desc;
+		out = link->out_ep->desc;
 		usb_ep_disable(link->in_ep);
 		usb_ep_disable(link->out_ep);
 		if (netif_carrier_ok(net)) {
 			DBG(dev, "host still using in/out endpoints\n");
+			link->in_ep->desc = in;
+			link->out_ep->desc = out;
 			usb_ep_enable(link->in_ep);
 			usb_ep_enable(link->out_ep);
 		}
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index bb55eb4..d7fe287 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -56,15 +56,6 @@
 #define	EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT		8
 #define	EHCI_INSNREG05_ULPI_WRDATA_SHIFT		0
 
-/* Errata i693 */
-static struct clk	*utmi_p1_fck;
-static struct clk	*utmi_p2_fck;
-static struct clk	*xclk60mhsp1_ck;
-static struct clk	*xclk60mhsp2_ck;
-static struct clk	*usbhost_p1_fck;
-static struct clk	*usbhost_p2_fck;
-static struct clk	*init_60m_fclk;
-
 /*-------------------------------------------------------------------------*/
 
 static const struct hc_driver ehci_omap_hc_driver;
@@ -80,40 +71,6 @@
 	return __raw_readl(base + reg);
 }
 
-/* Erratum i693 workaround sequence */
-static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
-{
-	int ret = 0;
-
-	/* Switch to the internal 60 MHz clock */
-	ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
-	if (ret != 0)
-		ehci_err(ehci, "init_60m_fclk set parent"
-			"failed error:%d\n", ret);
-
-	ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
-	if (ret != 0)
-		ehci_err(ehci, "init_60m_fclk set parent"
-			"failed error:%d\n", ret);
-
-	clk_enable(usbhost_p1_fck);
-	clk_enable(usbhost_p2_fck);
-
-	/* Wait 1ms and switch back to the external clock */
-	mdelay(1);
-	ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
-	if (ret != 0)
-		ehci_err(ehci, "xclk60mhsp1_ck set parent"
-			"failed error:%d\n", ret);
-
-	ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
-	if (ret != 0)
-		ehci_err(ehci, "xclk60mhsp2_ck set parent"
-			"failed error:%d\n", ret);
-
-	clk_disable(usbhost_p1_fck);
-	clk_disable(usbhost_p2_fck);
-}
 
 static void omap_ehci_soft_phy_reset(struct usb_hcd *hcd, u8 port)
 {
@@ -195,50 +152,6 @@
 	return rc;
 }
 
-static int omap_ehci_hub_control(
-	struct usb_hcd	*hcd,
-	u16		typeReq,
-	u16		wValue,
-	u16		wIndex,
-	char		*buf,
-	u16		wLength
-)
-{
-	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
-	u32 __iomem *status_reg = &ehci->regs->port_status[
-				(wIndex & 0xff) - 1];
-	u32		temp;
-	unsigned long	flags;
-	int		retval = 0;
-
-	spin_lock_irqsave(&ehci->lock, flags);
-
-	if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
-		temp = ehci_readl(ehci, status_reg);
-		if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
-			retval = -EPIPE;
-			goto done;
-		}
-
-		temp &= ~PORT_WKCONN_E;
-		temp |= PORT_WKDISC_E | PORT_WKOC_E;
-		ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
-
-		omap_ehci_erratum_i693(ehci);
-
-		set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
-		goto done;
-	}
-
-	spin_unlock_irqrestore(&ehci->lock, flags);
-
-	/* Handle the hub control events here */
-	return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
-done:
-	spin_unlock_irqrestore(&ehci->lock, flags);
-	return retval;
-}
-
 static void disable_put_regulator(
 		struct ehci_hcd_omap_platform_data *pdata)
 {
@@ -351,79 +264,9 @@
 		goto err_pm_runtime;
 	}
 
-	/* get clocks */
-	utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
-	if (IS_ERR(utmi_p1_fck)) {
-		ret = PTR_ERR(utmi_p1_fck);
-		dev_err(dev, "utmi_p1_gfclk failed error:%d\n",	ret);
-		goto err_add_hcd;
-	}
-
-	xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
-	if (IS_ERR(xclk60mhsp1_ck)) {
-		ret = PTR_ERR(xclk60mhsp1_ck);
-		dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
-		goto err_utmi_p1_fck;
-	}
-
-	utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
-	if (IS_ERR(utmi_p2_fck)) {
-		ret = PTR_ERR(utmi_p2_fck);
-		dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
-		goto err_xclk60mhsp1_ck;
-	}
-
-	xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
-	if (IS_ERR(xclk60mhsp2_ck)) {
-		ret = PTR_ERR(xclk60mhsp2_ck);
-		dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
-		goto err_utmi_p2_fck;
-	}
-
-	usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
-	if (IS_ERR(usbhost_p1_fck)) {
-		ret = PTR_ERR(usbhost_p1_fck);
-		dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
-		goto err_xclk60mhsp2_ck;
-	}
-
-	usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
-	if (IS_ERR(usbhost_p2_fck)) {
-		ret = PTR_ERR(usbhost_p2_fck);
-		dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
-		goto err_usbhost_p1_fck;
-	}
-
-	init_60m_fclk = clk_get(dev, "init_60m_fclk");
-	if (IS_ERR(init_60m_fclk)) {
-		ret = PTR_ERR(init_60m_fclk);
-		dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
-		goto err_usbhost_p2_fck;
-	}
 
 	return 0;
 
-err_usbhost_p2_fck:
-	clk_put(usbhost_p2_fck);
-
-err_usbhost_p1_fck:
-	clk_put(usbhost_p1_fck);
-
-err_xclk60mhsp2_ck:
-	clk_put(xclk60mhsp2_ck);
-
-err_utmi_p2_fck:
-	clk_put(utmi_p2_fck);
-
-err_xclk60mhsp1_ck:
-	clk_put(xclk60mhsp1_ck);
-
-err_utmi_p1_fck:
-	clk_put(utmi_p1_fck);
-
-err_add_hcd:
-	usb_remove_hcd(hcd);
-
 err_pm_runtime:
 	disable_put_regulator(pdata);
 	pm_runtime_put_sync(dev);
@@ -454,14 +297,6 @@
 	iounmap(hcd->regs);
 	usb_put_hcd(hcd);
 
-	clk_put(utmi_p1_fck);
-	clk_put(utmi_p2_fck);
-	clk_put(xclk60mhsp1_ck);
-	clk_put(xclk60mhsp2_ck);
-	clk_put(usbhost_p1_fck);
-	clk_put(usbhost_p2_fck);
-	clk_put(init_60m_fclk);
-
 	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
 
@@ -532,7 +367,7 @@
 	 * root hub support
 	 */
 	.hub_status_data	= ehci_hub_status_data,
-	.hub_control		= omap_ehci_hub_control,
+	.hub_control		= ehci_hub_control,
 	.bus_suspend		= ehci_bus_suspend,
 	.bus_resume		= ehci_bus_resume,
 
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index 58c96bd..0c9e43c 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -40,7 +40,7 @@
 	ehci->need_io_watchdog = 0;
 
 	/* Set burst length to 16 words. */
-	ehci_writel(ehci, 0x1010, &ehci->regs->reserved[1]);
+	ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);
 
 	return ret;
 }
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 950e95e..26dedb3 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -799,11 +799,12 @@
 #endif
 
 	usb_remove_hcd(hcd);
-	usb_put_hcd(hcd);
 
 	tegra_usb_phy_close(tegra->phy);
 	iounmap(hcd->regs);
 
+	usb_put_hcd(hcd);
+
 	clk_disable_unprepare(tegra->clk);
 	clk_put(tegra->clk);
 
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 2ed112d..2563263 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -543,12 +543,12 @@
 			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
 			    short_ok ? "" : "not_",
 			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
+			/* save the data underrun error code for later and
+			 * proceed with the status stage
+			 */
+			urb->actual_length += PTD_GET_COUNT(ptd);
 			if (usb_pipecontrol(urb->pipe)) {
 				ep->nextpid = USB_PID_ACK;
-				/* save the data underrun error code for later and
-				 * proceed with the status stage
-				 */
-				urb->actual_length += PTD_GET_COUNT(ptd);
 				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 
 				if (urb->status == -EINPROGRESS)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index e7d75d2..f8b2d91 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -403,8 +403,6 @@
 static inline void
 usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
 {
-	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
-
 	usb_remove_hcd(hcd);
 	if (!IS_ERR_OR_NULL(hcd->phy)) {
 		(void) otg_set_host(hcd->phy->otg, 0);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index df0828c..c5e9e4a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -800,6 +800,13 @@
 }
 EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
 
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
+	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
+}
+EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
+
 /**
  * PCI Quirks for xHCI.
  *
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index b1002a8..ef004a5 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -10,6 +10,7 @@
 void usb_amd_quirk_pll_enable(void);
 bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
 void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 #else
 static inline void usb_amd_quirk_pll_disable(void) {}
 static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 18b231b..9bfd4ca11 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -94,11 +94,21 @@
 		xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
 		xhci->limit_active_eps = 64;
 		xhci->quirks |= XHCI_SW_BW_CHECKING;
+		/*
+		 * PPT desktop boards DH77EB and DH77DF will power back on after
+		 * a few seconds of being shutdown.  The fix for this is to
+		 * switch the ports from xHCI to EHCI on shutdown.  We can't use
+		 * DMI information to find those particular boards (since each
+		 * vendor will change the board name), so we have to key off all
+		 * PPT chipsets.
+		 */
+		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
 			pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
 		xhci->quirks |= XHCI_RESET_ON_RESUME;
 		xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_VIA)
 		xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 8275645..643c2f3 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -145,29 +145,37 @@
  */
 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
 {
-	union xhci_trb *next;
 	unsigned long long addr;
 
 	ring->deq_updates++;
 
-	/* If this is not event ring, there is one more usable TRB */
+	/*
+	 * If this is not event ring, and the dequeue pointer
+	 * is not on a link TRB, there is one more usable TRB
+	 */
 	if (ring->type != TYPE_EVENT &&
 			!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
 		ring->num_trbs_free++;
-	next = ++(ring->dequeue);
 
-	/* Update the dequeue pointer further if that was a link TRB or we're at
-	 * the end of an event ring segment (which doesn't have link TRBS)
-	 */
-	while (last_trb(xhci, ring, ring->deq_seg, next)) {
-		if (ring->type == TYPE_EVENT &&	last_trb_on_last_seg(xhci,
-				ring, ring->deq_seg, next)) {
-			ring->cycle_state = (ring->cycle_state ? 0 : 1);
+	do {
+		/*
+		 * Update the dequeue pointer further if that was a link TRB or
+		 * we're at the end of an event ring segment (which doesn't have
+		 * link TRBS)
+		 */
+		if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
+			if (ring->type == TYPE_EVENT &&
+					last_trb_on_last_seg(xhci, ring,
+						ring->deq_seg, ring->dequeue)) {
+				ring->cycle_state = (ring->cycle_state ? 0 : 1);
+			}
+			ring->deq_seg = ring->deq_seg->next;
+			ring->dequeue = ring->deq_seg->trbs;
+		} else {
+			ring->dequeue++;
 		}
-		ring->deq_seg = ring->deq_seg->next;
-		ring->dequeue = ring->deq_seg->trbs;
-		next = ring->dequeue;
-	}
+	} while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+
 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
 }
 
@@ -2073,8 +2081,8 @@
 		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
 			trb_comp_code = COMP_SHORT_TX;
 		else
-			xhci_warn(xhci, "WARN Successful completion on short TX: "
-					"needs XHCI_TRUST_TX_LENGTH quirk?\n");
+			xhci_warn_ratelimited(xhci,
+					"WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
 	case COMP_SHORT_TX:
 		break;
 	case COMP_STOP:
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7648b2d..c59d5b5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -166,7 +166,7 @@
 	xhci_writel(xhci, command, &xhci->op_regs->command);
 
 	ret = handshake(xhci, &xhci->op_regs->command,
-			CMD_RESET, 0, 250 * 1000);
+			CMD_RESET, 0, 10 * 1000 * 1000);
 	if (ret)
 		return ret;
 
@@ -175,7 +175,8 @@
 	 * xHCI cannot write to any doorbells or operational registers other
 	 * than status until the "Controller Not Ready" flag is cleared.
 	 */
-	ret = handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
+	ret = handshake(xhci, &xhci->op_regs->status,
+			STS_CNR, 0, 10 * 1000 * 1000);
 
 	for (i = 0; i < 2; ++i) {
 		xhci->bus_state[i].port_c_suspend = 0;
@@ -658,6 +659,9 @@
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
+	if (xhci->quirks && XHCI_SPURIOUS_REBOOT)
+		usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
+
 	spin_lock_irq(&xhci->lock);
 	xhci_halt(xhci);
 	spin_unlock_irq(&xhci->lock);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 55c0785..c713256 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1494,6 +1494,7 @@
 #define XHCI_TRUST_TX_LENGTH	(1 << 10)
 #define XHCI_LPM_SUPPORT	(1 << 11)
 #define XHCI_INTEL_HOST		(1 << 12)
+#define XHCI_SPURIOUS_REBOOT	(1 << 13)
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
 	/* There are two roothubs to keep track of bus suspend info for */
@@ -1537,6 +1538,8 @@
 	dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
 #define xhci_warn(xhci, fmt, args...) \
 	dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn_ratelimited(xhci, fmt, args...) \
+	dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
 
 /* TODO: copied from ehci.h - can be refactored? */
 /* xHCI spec says all registers are little endian */
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index ff08015..ae794b9 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -232,7 +232,7 @@
 	return err;
 }
 
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
 	{ USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
 	{ }                                             /* Terminating entry */
 };
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index ef0c3f9..6259f0d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -8,7 +8,7 @@
 	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
 	depends on USB && USB_GADGET
 	select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
-	select NOP_USB_XCEIV if (SOC_OMAPTI81XX || SOC_OMAPAM33XX)
+	select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX)
 	select TWL4030_USB if MACH_OMAP_3430SDP
 	select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
 	select USB_OTG_UTILS
@@ -57,7 +57,7 @@
 
 config USB_MUSB_DSPS
 	tristate "TI DSPS platforms"
-	depends on SOC_OMAPTI81XX || SOC_OMAPAM33XX
+	depends on SOC_TI81XX || SOC_AM33XX
 
 config USB_MUSB_BLACKFIN
 	tristate "Blackfin"
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 217808d..494772f 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -479,9 +479,9 @@
 		ret = -ENODEV;
 		goto err0;
 	}
-	strcpy((u8 *)res->name, "mc");
 	res->parent = NULL;
 	resources[1] = *res;
+	resources[1].name = "mc";
 
 	/* allocate the child platform device */
 	musb = platform_device_alloc("musb-hdrc", -1);
@@ -566,27 +566,28 @@
 	}
 	platform_set_drvdata(pdev, glue);
 
-	/* create the child platform device for first instances of musb */
-	ret = dsps_create_musb_pdev(glue, 0);
-	if (ret != 0) {
-		dev_err(&pdev->dev, "failed to create child pdev\n");
-		goto err2;
-	}
-
 	/* enable the usbss clocks */
 	pm_runtime_enable(&pdev->dev);
 
 	ret = pm_runtime_get_sync(&pdev->dev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
+		goto err2;
+	}
+
+	/* create the child platform device for first instances of musb */
+	ret = dsps_create_musb_pdev(glue, 0);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "failed to create child pdev\n");
 		goto err3;
 	}
 
 	return 0;
 
 err3:
-	pm_runtime_disable(&pdev->dev);
+	pm_runtime_put(&pdev->dev);
 err2:
+	pm_runtime_disable(&pdev->dev);
 	kfree(glue->wrp);
 err1:
 	kfree(glue);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 8c9bb1a..681da06 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -603,12 +603,12 @@
 	struct usbhs_priv *priv = dev_get_drvdata(dev);
 	struct platform_device *pdev = usbhs_priv_to_pdev(priv);
 
-	usbhs_platform_call(priv, phy_reset, pdev);
-
 	if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
 		usbhsc_power_ctrl(priv, 1);
 
-	usbhsc_hotplug(priv);
+	usbhs_platform_call(priv, phy_reset, pdev);
+
+	usbhsc_drvcllbck_notify_hotplug(pdev);
 
 	return 0;
 }
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 1834cf5..9b69a13 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1266,6 +1266,12 @@
 	return ret;
 }
 
+static int usbhsh_bus_nop(struct usb_hcd *hcd)
+{
+	/* nothing to do */
+	return 0;
+}
+
 static struct hc_driver usbhsh_driver = {
 	.description =		usbhsh_hcd_name,
 	.hcd_priv_size =	sizeof(struct usbhsh_hpriv),
@@ -1290,6 +1296,8 @@
 	 */
 	.hub_status_data =	usbhsh_hub_status_data,
 	.hub_control =		usbhsh_hub_control,
+	.bus_suspend =		usbhsh_bus_nop,
+	.bus_resume =		usbhsh_bus_nop,
 };
 
 /*
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index f398d1e..c15f2e7 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -61,18 +61,23 @@
 		goto exit;
 	}
 
+	/* make sure suspend/resume doesn't race against port_probe */
+	retval = usb_autopm_get_interface(port->serial->interface);
+	if (retval)
+		goto exit;
+
 	driver = port->serial->type;
 	if (driver->port_probe) {
 		retval = driver->port_probe(port);
 		if (retval)
-			goto exit;
+			goto exit_with_autopm;
 	}
 
 	retval = device_create_file(dev, &dev_attr_port_number);
 	if (retval) {
 		if (driver->port_remove)
 			retval = driver->port_remove(port);
-		goto exit;
+		goto exit_with_autopm;
 	}
 
 	minor = port->number;
@@ -81,6 +86,8 @@
 		 "%s converter now attached to ttyUSB%d\n",
 		 driver->description, minor);
 
+exit_with_autopm:
+	usb_autopm_put_interface(port->serial->interface);
 exit:
 	return retval;
 }
@@ -96,6 +103,9 @@
 	if (!port)
 		return -ENODEV;
 
+	/* make sure suspend/resume doesn't race against port_remove */
+	usb_autopm_get_interface(port->serial->interface);
+
 	device_remove_file(&port->dev, &dev_attr_port_number);
 
 	driver = port->serial->type;
@@ -107,6 +117,7 @@
 	dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
 		 driver->description, minor);
 
+	usb_autopm_put_interface(port->serial->interface);
 	return retval;
 }
 
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index bc912e5..5620db6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -811,6 +811,7 @@
 	{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
 	{ USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
 	{ USB_DEVICE(PI_VID, PI_E861_PID) },
+	{ USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
 	{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
 	{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 5661c7e..5dd96ca 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -795,6 +795,13 @@
 #define PI_E861_PID         0x1008  /* E-861 piezo controller USB connection */
 
 /*
+ * Kondo Kagaku Co.Ltd.
+ * http://www.kondo-robot.com/EN
+ */
+#define KONDO_VID 		0x165c
+#define KONDO_USB_SERIAL_PID	0x0002
+
+/*
  * Bayer Ascensia Contour blood glucose meter USB-converter cable.
  * http://winglucofacts.com/cables/
  */
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 5811d34..2cb30c5 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -227,7 +227,6 @@
 {
 	struct usb_wwan_intf_private *data = usb_get_serial_data(serial);
 
-	usb_wwan_release(serial);
 	usb_set_serial_data(serial, NULL);
 	kfree(data);
 }
@@ -309,12 +308,12 @@
 	.description =		"IPWireless converter",
 	.id_table =		id_table,
 	.num_ports =		1,
-	.disconnect =		usb_wwan_disconnect,
 	.open =			ipw_open,
 	.close =		ipw_close,
 	.probe =		ipw_probe,
 	.attach =		usb_wwan_startup,
 	.release =		ipw_release,
+	.port_remove =		usb_wwan_port_remove,
 	.dtr_rts =		ipw_dtr_rts,
 	.write =		usb_wwan_write,
 };
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 57eca24..2f6da1e 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -82,8 +82,7 @@
  * Defines used for sending commands to port
  */
 
-#define WAIT_FOR_EVER   (HZ * 0)	/* timeout urb is wait for ever */
-#define MOS_WDR_TIMEOUT (HZ * 5)	/* default urb timeout */
+#define MOS_WDR_TIMEOUT		5000	/* default urb timeout */
 
 #define MOS_PORT1       0x0200
 #define MOS_PORT2       0x0300
@@ -1232,9 +1231,12 @@
 		return 0;
 
 	spin_lock_irqsave(&mos7840_port->pool_lock, flags);
-	for (i = 0; i < NUM_URBS; ++i)
-		if (mos7840_port->busy[i])
-			chars += URB_TRANSFER_BUFFER_SIZE;
+	for (i = 0; i < NUM_URBS; ++i) {
+		if (mos7840_port->busy[i]) {
+			struct urb *urb = mos7840_port->write_urb_pool[i];
+			chars += urb->transfer_buffer_length;
+		}
+	}
 	spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
 	dbg("%s - returns %d", __func__, chars);
 	return chars;
@@ -1344,7 +1346,7 @@
 static void mos7840_block_until_chase_response(struct tty_struct *tty,
 					struct moschip_port *mos7840_port)
 {
-	int timeout = 1 * HZ;
+	int timeout = msecs_to_jiffies(1000);
 	int wait = 10;
 	int count;
 
@@ -2672,7 +2674,7 @@
 
 	/* setting configuration feature to one */
 	usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
-			(__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ);
+			(__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT);
 	return 0;
 error:
 	for (/* nothing */; i >= 0; i--) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 08ff9b8..cc40f47 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -80,85 +80,9 @@
 #define OPTION_PRODUCT_GTM380_MODEM		0x7201
 
 #define HUAWEI_VENDOR_ID			0x12D1
-#define HUAWEI_PRODUCT_E600			0x1001
-#define HUAWEI_PRODUCT_E220			0x1003
-#define HUAWEI_PRODUCT_E220BIS			0x1004
-#define HUAWEI_PRODUCT_E1401			0x1401
-#define HUAWEI_PRODUCT_E1402			0x1402
-#define HUAWEI_PRODUCT_E1403			0x1403
-#define HUAWEI_PRODUCT_E1404			0x1404
-#define HUAWEI_PRODUCT_E1405			0x1405
-#define HUAWEI_PRODUCT_E1406			0x1406
-#define HUAWEI_PRODUCT_E1407			0x1407
-#define HUAWEI_PRODUCT_E1408			0x1408
-#define HUAWEI_PRODUCT_E1409			0x1409
-#define HUAWEI_PRODUCT_E140A			0x140A
-#define HUAWEI_PRODUCT_E140B			0x140B
-#define HUAWEI_PRODUCT_E140C			0x140C
-#define HUAWEI_PRODUCT_E140D			0x140D
-#define HUAWEI_PRODUCT_E140E			0x140E
-#define HUAWEI_PRODUCT_E140F			0x140F
-#define HUAWEI_PRODUCT_E1410			0x1410
-#define HUAWEI_PRODUCT_E1411			0x1411
-#define HUAWEI_PRODUCT_E1412			0x1412
-#define HUAWEI_PRODUCT_E1413			0x1413
-#define HUAWEI_PRODUCT_E1414			0x1414
-#define HUAWEI_PRODUCT_E1415			0x1415
-#define HUAWEI_PRODUCT_E1416			0x1416
-#define HUAWEI_PRODUCT_E1417			0x1417
-#define HUAWEI_PRODUCT_E1418			0x1418
-#define HUAWEI_PRODUCT_E1419			0x1419
-#define HUAWEI_PRODUCT_E141A			0x141A
-#define HUAWEI_PRODUCT_E141B			0x141B
-#define HUAWEI_PRODUCT_E141C			0x141C
-#define HUAWEI_PRODUCT_E141D			0x141D
-#define HUAWEI_PRODUCT_E141E			0x141E
-#define HUAWEI_PRODUCT_E141F			0x141F
-#define HUAWEI_PRODUCT_E1420			0x1420
-#define HUAWEI_PRODUCT_E1421			0x1421
-#define HUAWEI_PRODUCT_E1422			0x1422
-#define HUAWEI_PRODUCT_E1423			0x1423
-#define HUAWEI_PRODUCT_E1424			0x1424
-#define HUAWEI_PRODUCT_E1425			0x1425
-#define HUAWEI_PRODUCT_E1426			0x1426
-#define HUAWEI_PRODUCT_E1427			0x1427
-#define HUAWEI_PRODUCT_E1428			0x1428
-#define HUAWEI_PRODUCT_E1429			0x1429
-#define HUAWEI_PRODUCT_E142A			0x142A
-#define HUAWEI_PRODUCT_E142B			0x142B
-#define HUAWEI_PRODUCT_E142C			0x142C
-#define HUAWEI_PRODUCT_E142D			0x142D
-#define HUAWEI_PRODUCT_E142E			0x142E
-#define HUAWEI_PRODUCT_E142F			0x142F
-#define HUAWEI_PRODUCT_E1430			0x1430
-#define HUAWEI_PRODUCT_E1431			0x1431
-#define HUAWEI_PRODUCT_E1432			0x1432
-#define HUAWEI_PRODUCT_E1433			0x1433
-#define HUAWEI_PRODUCT_E1434			0x1434
-#define HUAWEI_PRODUCT_E1435			0x1435
-#define HUAWEI_PRODUCT_E1436			0x1436
-#define HUAWEI_PRODUCT_E1437			0x1437
-#define HUAWEI_PRODUCT_E1438			0x1438
-#define HUAWEI_PRODUCT_E1439			0x1439
-#define HUAWEI_PRODUCT_E143A			0x143A
-#define HUAWEI_PRODUCT_E143B			0x143B
-#define HUAWEI_PRODUCT_E143C			0x143C
-#define HUAWEI_PRODUCT_E143D			0x143D
-#define HUAWEI_PRODUCT_E143E			0x143E
-#define HUAWEI_PRODUCT_E143F			0x143F
 #define HUAWEI_PRODUCT_K4505			0x1464
 #define HUAWEI_PRODUCT_K3765			0x1465
-#define HUAWEI_PRODUCT_E14AC			0x14AC
-#define HUAWEI_PRODUCT_K3806			0x14AE
 #define HUAWEI_PRODUCT_K4605			0x14C6
-#define HUAWEI_PRODUCT_K5005			0x14C8
-#define HUAWEI_PRODUCT_K3770			0x14C9
-#define HUAWEI_PRODUCT_K3771			0x14CA
-#define HUAWEI_PRODUCT_K4510			0x14CB
-#define HUAWEI_PRODUCT_K4511			0x14CC
-#define HUAWEI_PRODUCT_ETS1220			0x1803
-#define HUAWEI_PRODUCT_E353			0x1506
-#define HUAWEI_PRODUCT_E173S			0x1C05
 
 #define QUANTA_VENDOR_ID			0x0408
 #define QUANTA_PRODUCT_Q101			0xEA02
@@ -615,104 +539,123 @@
 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
 		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
 		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
 		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+
+
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -943,6 +886,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff),
+	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1297,8 +1242,8 @@
 	.tiocmset          = usb_wwan_tiocmset,
 	.ioctl             = usb_wwan_ioctl,
 	.attach            = usb_wwan_startup,
-	.disconnect        = usb_wwan_disconnect,
 	.release           = option_release,
+	.port_remove	   = usb_wwan_port_remove,
 	.read_int_callback = option_instat_callback,
 #ifdef CONFIG_PM
 	.suspend           = usb_wwan_suspend,
@@ -1414,8 +1359,6 @@
 	struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
 	struct option_private *priv = intfdata->private;
 
-	usb_wwan_release(serial);
-
 	kfree(priv);
 	kfree(intfdata);
 }
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 8d10301..bfd5077 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -199,43 +199,49 @@
 
 	/* default to enabling interface */
 	altsetting = 0;
-	switch (ifnum) {
-		/* Composite mode; don't bind to the QMI/net interface as that
-		 * gets handled by other drivers.
-		 */
 
+	/* Composite mode; don't bind to the QMI/net interface as that
+	 * gets handled by other drivers.
+	 */
+
+	if (is_gobi1k) {
 		/* Gobi 1K USB layout:
 		 * 0: serial port (doesn't respond)
 		 * 1: serial port (doesn't respond)
 		 * 2: AT-capable modem port
 		 * 3: QMI/net
-		 *
-		 * Gobi 2K+ USB layout:
+		 */
+		if (ifnum == 2)
+			dev_dbg(dev, "Modem port found\n");
+		else
+			altsetting = -1;
+	} else {
+		/* Gobi 2K+ USB layout:
 		 * 0: QMI/net
 		 * 1: DM/DIAG (use libqcdm from ModemManager for communication)
 		 * 2: AT-capable modem port
 		 * 3: NMEA
 		 */
-
-	case 1:
-		if (is_gobi1k)
+		switch (ifnum) {
+		case 0:
+			/* Don't claim the QMI/net interface */
 			altsetting = -1;
-		else
+			break;
+		case 1:
 			dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n");
-		break;
-	case 2:
-		dev_dbg(dev, "Modem port found\n");
-		break;
-	case 3:
-		if (is_gobi1k)
-			altsetting = -1;
-		else
+			break;
+		case 2:
+			dev_dbg(dev, "Modem port found\n");
+			break;
+		case 3:
 			/*
 			 * NMEA (serial line 9600 8N1)
 			 * # echo "\$GPS_START" > /dev/ttyUSBx
 			 * # echo "\$GPS_STOP"  > /dev/ttyUSBx
 			 */
 			dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n");
+			break;
+		}
 	}
 
 done:
@@ -262,8 +268,7 @@
 {
 	struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
 
-	/* Call usb_wwan release & free the private data allocated in qcprobe */
-	usb_wwan_release(serial);
+	/* Free the private data allocated in qcprobe */
 	usb_set_serial_data(serial, NULL);
 	kfree(priv);
 }
@@ -283,8 +288,8 @@
 	.write_room	     = usb_wwan_write_room,
 	.chars_in_buffer     = usb_wwan_chars_in_buffer,
 	.attach		     = usb_wwan_startup,
-	.disconnect	     = usb_wwan_disconnect,
 	.release	     = qc_release,
+	.port_remove	     = usb_wwan_port_remove,
 #ifdef CONFIG_PM
 	.suspend	     = usb_wwan_suspend,
 	.resume		     = usb_wwan_resume,
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index c47b6ec..1f034d2 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -9,8 +9,7 @@
 extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
 extern void usb_wwan_close(struct usb_serial_port *port);
 extern int usb_wwan_startup(struct usb_serial *serial);
-extern void usb_wwan_disconnect(struct usb_serial *serial);
-extern void usb_wwan_release(struct usb_serial *serial);
+extern int usb_wwan_port_remove(struct usb_serial_port *port);
 extern int usb_wwan_write_room(struct tty_struct *tty);
 extern void usb_wwan_set_termios(struct tty_struct *tty,
 				 struct usb_serial_port *port,
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index f35971d..6855d5e 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -565,6 +565,33 @@
 }
 EXPORT_SYMBOL(usb_wwan_startup);
 
+int usb_wwan_port_remove(struct usb_serial_port *port)
+{
+	int i;
+	struct usb_wwan_port_private *portdata;
+
+	portdata = usb_get_serial_port_data(port);
+	usb_set_serial_port_data(port, NULL);
+
+	/* Stop reading/writing urbs and free them */
+	for (i = 0; i < N_IN_URB; i++) {
+		usb_kill_urb(portdata->in_urbs[i]);
+		usb_free_urb(portdata->in_urbs[i]);
+		free_page((unsigned long)portdata->in_buffer[i]);
+	}
+	for (i = 0; i < N_OUT_URB; i++) {
+		usb_kill_urb(portdata->out_urbs[i]);
+		usb_free_urb(portdata->out_urbs[i]);
+		kfree(portdata->out_buffer[i]);
+	}
+
+	/* Now free port private data */
+	kfree(portdata);
+	return 0;
+}
+EXPORT_SYMBOL(usb_wwan_port_remove);
+
+#ifdef CONFIG_PM
 static void stop_read_write_urbs(struct usb_serial *serial)
 {
 	int i, j;
@@ -575,6 +602,8 @@
 	for (i = 0; i < serial->num_ports; ++i) {
 		port = serial->port[i];
 		portdata = usb_get_serial_port_data(port);
+		if (!portdata)
+			continue;
 		for (j = 0; j < N_IN_URB; j++)
 			usb_kill_urb(portdata->in_urbs[j]);
 		for (j = 0; j < N_OUT_URB; j++)
@@ -582,45 +611,6 @@
 	}
 }
 
-void usb_wwan_disconnect(struct usb_serial *serial)
-{
-	stop_read_write_urbs(serial);
-}
-EXPORT_SYMBOL(usb_wwan_disconnect);
-
-void usb_wwan_release(struct usb_serial *serial)
-{
-	int i, j;
-	struct usb_serial_port *port;
-	struct usb_wwan_port_private *portdata;
-
-	/* Now free them */
-	for (i = 0; i < serial->num_ports; ++i) {
-		port = serial->port[i];
-		portdata = usb_get_serial_port_data(port);
-
-		for (j = 0; j < N_IN_URB; j++) {
-			usb_free_urb(portdata->in_urbs[j]);
-			free_page((unsigned long)
-				  portdata->in_buffer[j]);
-			portdata->in_urbs[j] = NULL;
-		}
-		for (j = 0; j < N_OUT_URB; j++) {
-			usb_free_urb(portdata->out_urbs[j]);
-			kfree(portdata->out_buffer[j]);
-			portdata->out_urbs[j] = NULL;
-		}
-	}
-
-	/* Now free per port private data */
-	for (i = 0; i < serial->num_ports; i++) {
-		port = serial->port[i];
-		kfree(usb_get_serial_port_data(port));
-	}
-}
-EXPORT_SYMBOL(usb_wwan_release);
-
-#ifdef CONFIG_PM
 int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
 {
 	struct usb_wwan_intf_private *intfdata = serial->private;
@@ -712,7 +702,7 @@
 
 		/* skip closed ports */
 		spin_lock_irq(&intfdata->susp_lock);
-		if (!portdata->opened) {
+		if (!portdata || !portdata->opened) {
 			spin_unlock_irq(&intfdata->susp_lock);
 			continue;
 		}
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index e4e2fd1..202bba6 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -9,3 +9,6 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called vhost_net.
 
+if STAGING
+source "drivers/vhost/Kconfig.tcm"
+endif
diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm
new file mode 100644
index 0000000..a9c6f76
--- /dev/null
+++ b/drivers/vhost/Kconfig.tcm
@@ -0,0 +1,6 @@
+config TCM_VHOST
+	tristate "TCM_VHOST fabric module (EXPERIMENTAL)"
+	depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m
+	default n
+	---help---
+	Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 72dd020..a27b053 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,2 +1,4 @@
 obj-$(CONFIG_VHOST_NET) += vhost_net.o
 vhost_net-y := vhost.o net.o
+
+obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
new file mode 100644
index 0000000..fb36654
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.c
@@ -0,0 +1,1628 @@
+/*******************************************************************************
+ * Vhost kernel TCM fabric driver for virtio SCSI initiators
+ *
+ * (C) Copyright 2010-2012 RisingTide Systems LLC.
+ * (C) Copyright 2010-2012 IBM Corp.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
+#include <linux/virtio_scsi.h>
+
+#include "vhost.c"
+#include "vhost.h"
+#include "tcm_vhost.h"
+
+struct vhost_scsi {
+	atomic_t vhost_ref_cnt;
+	struct tcm_vhost_tpg *vs_tpg;
+	struct vhost_dev dev;
+	struct vhost_virtqueue vqs[3];
+
+	struct vhost_work vs_completion_work; /* cmd completion work item */
+	struct list_head vs_completion_list;  /* cmd completion queue */
+	spinlock_t vs_completion_lock;        /* protects s_completion_list */
+};
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+
+static struct workqueue_struct *tcm_vhost_workqueue;
+
+/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(tcm_vhost_mutex);
+static LIST_HEAD(tcm_vhost_list);
+
+static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static char *tcm_vhost_get_fabric_name(void)
+{
+	return "vhost";
+}
+
+static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_fabric_proto_ident(se_tpg);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_fabric_proto_ident(se_tpg);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_fabric_proto_ident(se_tpg);
+	default:
+		pr_err("Unknown tport_proto_id: 0x%02x, using"
+			" SAS emulation\n", tport->tport_proto_id);
+		break;
+	}
+
+	return sas_get_fabric_proto_ident(se_tpg);
+}
+
+static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	return &tport->tport_name[0];
+}
+
+static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	return tpg->tport_tpgt;
+}
+
+static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 tcm_vhost_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	default:
+		pr_err("Unknown tport_proto_id: 0x%02x, using"
+			" SAS emulation\n", tport->tport_proto_id);
+		break;
+	}
+
+	return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+			format_code, buf);
+}
+
+static u32 tcm_vhost_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	default:
+		pr_err("Unknown tport_proto_id: 0x%02x, using"
+			" SAS emulation\n", tport->tport_proto_id);
+		break;
+	}
+
+	return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+			format_code);
+}
+
+static char *tcm_vhost_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	case SCSI_PROTOCOL_FCP:
+		return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	default:
+		pr_err("Unknown tport_proto_id: 0x%02x, using"
+			" SAS emulation\n", tport->tport_proto_id);
+		break;
+	}
+
+	return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+			port_nexus_ptr);
+}
+
+static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
+	struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_nacl *nacl;
+
+	nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
+	if (!nacl) {
+		pr_err("Unable to alocate struct tcm_vhost_nacl\n");
+		return NULL;
+	}
+
+	return &nacl->se_node_acl;
+}
+
+static void tcm_vhost_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl)
+{
+	struct tcm_vhost_nacl *nacl = container_of(se_nacl,
+			struct tcm_vhost_nacl, se_node_acl);
+	kfree(nacl);
+}
+
+static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+{
+	return;
+}
+
+static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void tcm_vhost_close_session(struct se_session *se_sess)
+{
+	return;
+}
+
+static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+{
+	/* Go ahead and process the write immediately */
+	target_execute_cmd(se_cmd);
+	return 0;
+}
+
+static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *);
+
+static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+				struct tcm_vhost_cmd, tvc_se_cmd);
+	vhost_scsi_complete_cmd(tv_cmd);
+	return 0;
+}
+
+static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
+{
+	struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+				struct tcm_vhost_cmd, tvc_se_cmd);
+	vhost_scsi_complete_cmd(tv_cmd);
+	return 0;
+}
+
+static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd,
+	u32 sense_length)
+{
+	return 0;
+}
+
+static u16 tcm_vhost_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+
+	/* TODO locking against target/backend threads? */
+	transport_generic_free_cmd(se_cmd, 1);
+
+	if (tv_cmd->tvc_sgl_count) {
+		u32 i;
+		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+
+		kfree(tv_cmd->tvc_sgl);
+	}
+
+	kfree(tv_cmd);
+}
+
+/* Dequeue a command from the completion list */
+static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
+	struct vhost_scsi *vs)
+{
+	struct tcm_vhost_cmd *tv_cmd = NULL;
+
+	spin_lock_bh(&vs->vs_completion_lock);
+	if (list_empty(&vs->vs_completion_list)) {
+		spin_unlock_bh(&vs->vs_completion_lock);
+		return NULL;
+	}
+
+	list_for_each_entry(tv_cmd, &vs->vs_completion_list,
+			    tvc_completion_list) {
+		list_del(&tv_cmd->tvc_completion_list);
+		break;
+	}
+	spin_unlock_bh(&vs->vs_completion_lock);
+	return tv_cmd;
+}
+
+/* Fill in status and signal that we are done processing this command
+ *
+ * This is scheduled in the vhost work queue so we are called with the owner
+ * process mm and can access the vring.
+ */
+static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+{
+	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+					vs_completion_work);
+	struct tcm_vhost_cmd *tv_cmd;
+
+	while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) {
+		struct virtio_scsi_cmd_resp v_rsp;
+		struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+		int ret;
+
+		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
+			tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
+
+		memset(&v_rsp, 0, sizeof(v_rsp));
+		v_rsp.resid = se_cmd->residual_count;
+		/* TODO is status_qualifier field needed? */
+		v_rsp.status = se_cmd->scsi_status;
+		v_rsp.sense_len = se_cmd->scsi_sense_length;
+		memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
+		       v_rsp.sense_len);
+		ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
+		if (likely(ret == 0))
+			vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
+		else
+			pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
+		vhost_scsi_free_cmd(tv_cmd);
+	}
+
+	vhost_signal(&vs->dev, &vs->vqs[2]);
+}
+
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+	struct vhost_scsi *vs = tv_cmd->tvc_vhost;
+
+	pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
+
+	spin_lock_bh(&vs->vs_completion_lock);
+	list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
+	spin_unlock_bh(&vs->vs_completion_lock);
+
+	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+}
+
+static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
+	struct tcm_vhost_tpg *tv_tpg,
+	struct virtio_scsi_cmd_req *v_req,
+	u32 exp_data_len,
+	int data_direction)
+{
+	struct tcm_vhost_cmd *tv_cmd;
+	struct tcm_vhost_nexus *tv_nexus;
+	struct se_portal_group *se_tpg = &tv_tpg->se_tpg;
+	struct se_session *se_sess;
+	struct se_cmd *se_cmd;
+	int sam_task_attr;
+
+	tv_nexus = tv_tpg->tpg_nexus;
+	if (!tv_nexus) {
+		pr_err("Unable to locate active struct tcm_vhost_nexus\n");
+		return ERR_PTR(-EIO);
+	}
+	se_sess = tv_nexus->tvn_se_sess;
+
+	tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
+	if (!tv_cmd) {
+		pr_err("Unable to allocate struct tcm_vhost_cmd\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
+	tv_cmd->tvc_tag = v_req->tag;
+
+	se_cmd = &tv_cmd->tvc_se_cmd;
+	/*
+	 * Locate the SAM Task Attr from virtio_scsi_cmd_req
+	 */
+	sam_task_attr = v_req->task_attr;
+	/*
+	 * Initialize struct se_cmd descriptor from TCM infrastructure
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len,
+				data_direction, sam_task_attr,
+				&tv_cmd->tvc_sense_buf[0]);
+
+#if 0	/* FIXME: vhost_scsi_allocate_cmd() BIDI operation */
+	if (bidi)
+		se_cmd->se_cmd_flags |= SCF_BIDI;
+#endif
+	return tv_cmd;
+}
+
+/*
+ * Map a user memory range into a scatterlist
+ *
+ * Returns the number of scatterlist entries used or -errno on error.
+ */
+static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
+	unsigned int sgl_count, void __user *ptr, size_t len, int write)
+{
+	struct scatterlist *sg = sgl;
+	unsigned int npages = 0;
+	int ret;
+
+	while (len > 0) {
+		struct page *page;
+		unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
+		unsigned int nbytes = min_t(unsigned int,
+				PAGE_SIZE - offset, len);
+
+		if (npages == sgl_count) {
+			ret = -ENOBUFS;
+			goto err;
+		}
+
+		ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
+		BUG_ON(ret == 0); /* we should either get our page or fail */
+		if (ret < 0)
+			goto err;
+
+		sg_set_page(sg, page, nbytes, offset);
+		ptr += nbytes;
+		len -= nbytes;
+		sg++;
+		npages++;
+	}
+	return npages;
+
+err:
+	/* Put pages that we hold */
+	for (sg = sgl; sg != &sgl[npages]; sg++)
+		put_page(sg_page(sg));
+	return ret;
+}
+
+static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+	struct iovec *iov, unsigned int niov, int write)
+{
+	int ret;
+	unsigned int i;
+	u32 sgl_count;
+	struct scatterlist *sg;
+
+	/*
+	 * Find out how long sglist needs to be
+	 */
+	sgl_count = 0;
+	for (i = 0; i < niov; i++) {
+		sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
+				PAGE_SIZE - 1) >> PAGE_SHIFT) -
+				((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
+	}
+	/* TODO overflow checking */
+
+	sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
+	if (!sg)
+		return -ENOMEM;
+	pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__,
+	       sg, sgl_count, IS_ERR(sg));
+	sg_init_table(sg, sgl_count);
+
+	tv_cmd->tvc_sgl = sg;
+	tv_cmd->tvc_sgl_count = sgl_count;
+
+	pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
+	for (i = 0; i < niov; i++) {
+		ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
+					iov[i].iov_len, write);
+		if (ret < 0) {
+			for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+				put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+			kfree(tv_cmd->tvc_sgl);
+			tv_cmd->tvc_sgl = NULL;
+			tv_cmd->tvc_sgl_count = 0;
+			return ret;
+		}
+
+		sg += ret;
+		sgl_count -= ret;
+	}
+	return 0;
+}
+
+static void tcm_vhost_submission_work(struct work_struct *work)
+{
+	struct tcm_vhost_cmd *tv_cmd =
+		container_of(work, struct tcm_vhost_cmd, work);
+	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+	struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
+	int rc, sg_no_bidi = 0;
+	/*
+	 * Locate the struct se_lun pointer based on v_req->lun, and
+	 * attach it to struct se_cmd
+	 */
+	rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun);
+	if (rc < 0) {
+		pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun);
+		transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd,
+			tv_cmd->tvc_se_cmd.scsi_sense_reason, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	}
+
+	rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb);
+	if (rc == -ENOMEM) {
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	} else if (rc < 0) {
+		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+			tcm_vhost_queue_status(se_cmd);
+		else
+			transport_send_check_condition_and_sense(se_cmd,
+					se_cmd->scsi_sense_reason, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	}
+
+	if (tv_cmd->tvc_sgl_count) {
+		sg_ptr = tv_cmd->tvc_sgl;
+		/*
+		 * For BIDI commands, pass in the extra READ buffer
+		 * to transport_generic_map_mem_to_cmd() below..
+		 */
+/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
+#if 0
+		if (se_cmd->se_cmd_flags & SCF_BIDI) {
+			sg_bidi_ptr = NULL;
+			sg_no_bidi = 0;
+		}
+#endif
+	} else {
+		sg_ptr = NULL;
+	}
+
+	rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr,
+				tv_cmd->tvc_sgl_count, sg_bidi_ptr,
+				sg_no_bidi);
+	if (rc < 0) {
+		transport_send_check_condition_and_sense(se_cmd,
+				se_cmd->scsi_sense_reason, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	}
+	transport_handle_cdb_direct(se_cmd);
+}
+
+static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
+{
+	struct vhost_virtqueue *vq = &vs->vqs[2];
+	struct virtio_scsi_cmd_req v_req;
+	struct tcm_vhost_tpg *tv_tpg;
+	struct tcm_vhost_cmd *tv_cmd;
+	u32 exp_data_len, data_first, data_num, data_direction;
+	unsigned out, in, i;
+	int head, ret;
+
+	/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
+	tv_tpg = vs->vs_tpg;
+	if (unlikely(!tv_tpg)) {
+		pr_err("%s endpoint not set\n", __func__);
+		return;
+	}
+
+	mutex_lock(&vq->mutex);
+	vhost_disable_notify(&vs->dev, vq);
+
+	for (;;) {
+		head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+					ARRAY_SIZE(vq->iov), &out, &in,
+					NULL, NULL);
+		pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+					head, out, in);
+		/* On error, stop handling until the next kick. */
+		if (unlikely(head < 0))
+			break;
+		/* Nothing new?  Wait for eventfd to tell us they refilled. */
+		if (head == vq->num) {
+			if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+				vhost_disable_notify(&vs->dev, vq);
+				continue;
+			}
+			break;
+		}
+
+/* FIXME: BIDI operation */
+		if (out == 1 && in == 1) {
+			data_direction = DMA_NONE;
+			data_first = 0;
+			data_num = 0;
+		} else if (out == 1 && in > 1) {
+			data_direction = DMA_FROM_DEVICE;
+			data_first = out + 1;
+			data_num = in - 1;
+		} else if (out > 1 && in == 1) {
+			data_direction = DMA_TO_DEVICE;
+			data_first = 1;
+			data_num = out - 1;
+		} else {
+			vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
+					out, in);
+			break;
+		}
+
+		/*
+		 * Check for a sane resp buffer so we can report errors to
+		 * the guest.
+		 */
+		if (unlikely(vq->iov[out].iov_len !=
+					sizeof(struct virtio_scsi_cmd_resp))) {
+			vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+				" bytes\n", vq->iov[out].iov_len);
+			break;
+		}
+
+		if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
+			vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
+				" bytes\n", vq->iov[0].iov_len);
+			break;
+		}
+		pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
+			" len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
+		ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
+				sizeof(v_req));
+		if (unlikely(ret)) {
+			vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
+			break;
+		}
+
+		exp_data_len = 0;
+		for (i = 0; i < data_num; i++)
+			exp_data_len += vq->iov[data_first + i].iov_len;
+
+		tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
+					exp_data_len, data_direction);
+		if (IS_ERR(tv_cmd)) {
+			vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
+					PTR_ERR(tv_cmd));
+			break;
+		}
+		pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
+			": %d\n", tv_cmd, exp_data_len, data_direction);
+
+		tv_cmd->tvc_vhost = vs;
+
+		if (unlikely(vq->iov[out].iov_len !=
+				sizeof(struct virtio_scsi_cmd_resp))) {
+			vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+				" bytes, out: %d, in: %d\n",
+				vq->iov[out].iov_len, out, in);
+			break;
+		}
+
+		tv_cmd->tvc_resp = vq->iov[out].iov_base;
+
+		/*
+		 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
+		 * that will be used by tcm_vhost_new_cmd_map() and down into
+		 * target_setup_cmd_from_cdb()
+		 */
+		memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
+		/*
+		 * Check that the recieved CDB size does not exceeded our
+		 * hardcoded max for tcm_vhost
+		 */
+		/* TODO what if cdb was too small for varlen cdb header? */
+		if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
+					TCM_VHOST_MAX_CDB_SIZE)) {
+			vq_err(vq, "Received SCSI CDB with command_size: %d that"
+				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+				scsi_command_size(tv_cmd->tvc_cdb),
+				TCM_VHOST_MAX_CDB_SIZE);
+			break; /* TODO */
+		}
+		tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+
+		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
+			tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
+
+		if (data_direction != DMA_NONE) {
+			ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
+					&vq->iov[data_first], data_num,
+					data_direction == DMA_TO_DEVICE);
+			if (unlikely(ret)) {
+				vq_err(vq, "Failed to map iov to sgl\n");
+				break; /* TODO */
+			}
+		}
+
+		/*
+		 * Save the descriptor from vhost_get_vq_desc() to be used to
+		 * complete the virtio-scsi request in TCM callback context via
+		 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+		 */
+		tv_cmd->tvc_vq_desc = head;
+		/*
+		 * Dispatch tv_cmd descriptor for cmwq execution in process
+		 * context provided by tcm_vhost_workqueue.  This also ensures
+		 * tv_cmd is executed on the same kworker CPU as this vhost
+		 * thread to gain positive L2 cache locality effects..
+		 */
+		INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
+		queue_work(tcm_vhost_workqueue, &tv_cmd->work);
+	}
+
+	mutex_unlock(&vq->mutex);
+}
+
+static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
+{
+	pr_err("%s: The handling func for control queue.\n", __func__);
+}
+
+static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
+{
+	pr_err("%s: The handling func for event queue.\n", __func__);
+}
+
+static void vhost_scsi_handle_kick(struct vhost_work *work)
+{
+	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+						poll.work);
+	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+	vhost_scsi_handle_vq(vs);
+}
+
+/*
+ * Called from vhost_scsi_ioctl() context to walk the list of available
+ * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ */
+static int vhost_scsi_set_endpoint(
+	struct vhost_scsi *vs,
+	struct vhost_scsi_target *t)
+{
+	struct tcm_vhost_tport *tv_tport;
+	struct tcm_vhost_tpg *tv_tpg;
+	int index;
+
+	mutex_lock(&vs->dev.mutex);
+	/* Verify that ring has been setup correctly. */
+	for (index = 0; index < vs->dev.nvqs; ++index) {
+		/* Verify that ring has been setup correctly. */
+		if (!vhost_vq_access_ok(&vs->vqs[index])) {
+			mutex_unlock(&vs->dev.mutex);
+			return -EFAULT;
+		}
+	}
+
+	if (vs->vs_tpg) {
+		mutex_unlock(&vs->dev.mutex);
+		return -EEXIST;
+	}
+	mutex_unlock(&vs->dev.mutex);
+
+	mutex_lock(&tcm_vhost_mutex);
+	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
+		mutex_lock(&tv_tpg->tv_tpg_mutex);
+		if (!tv_tpg->tpg_nexus) {
+			mutex_unlock(&tv_tpg->tv_tpg_mutex);
+			continue;
+		}
+		if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) {
+			mutex_unlock(&tv_tpg->tv_tpg_mutex);
+			continue;
+		}
+		tv_tport = tv_tpg->tport;
+
+		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
+		    (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
+			atomic_inc(&tv_tpg->tv_tpg_vhost_count);
+			smp_mb__after_atomic_inc();
+			mutex_unlock(&tv_tpg->tv_tpg_mutex);
+			mutex_unlock(&tcm_vhost_mutex);
+
+			mutex_lock(&vs->dev.mutex);
+			vs->vs_tpg = tv_tpg;
+			atomic_inc(&vs->vhost_ref_cnt);
+			smp_mb__after_atomic_inc();
+			mutex_unlock(&vs->dev.mutex);
+			return 0;
+		}
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+	}
+	mutex_unlock(&tcm_vhost_mutex);
+	return -EINVAL;
+}
+
+static int vhost_scsi_clear_endpoint(
+	struct vhost_scsi *vs,
+	struct vhost_scsi_target *t)
+{
+	struct tcm_vhost_tport *tv_tport;
+	struct tcm_vhost_tpg *tv_tpg;
+	int index;
+
+	mutex_lock(&vs->dev.mutex);
+	/* Verify that ring has been setup correctly. */
+	for (index = 0; index < vs->dev.nvqs; ++index) {
+		if (!vhost_vq_access_ok(&vs->vqs[index])) {
+			mutex_unlock(&vs->dev.mutex);
+			return -EFAULT;
+		}
+	}
+
+	if (!vs->vs_tpg) {
+		mutex_unlock(&vs->dev.mutex);
+		return -ENODEV;
+	}
+	tv_tpg = vs->vs_tpg;
+	tv_tport = tv_tpg->tport;
+
+	if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
+	    (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
+		mutex_unlock(&vs->dev.mutex);
+		pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
+			" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
+			tv_tport->tport_name, tv_tpg->tport_tpgt,
+			t->vhost_wwpn, t->vhost_tpgt);
+		return -EINVAL;
+	}
+	atomic_dec(&tv_tpg->tv_tpg_vhost_count);
+	vs->vs_tpg = NULL;
+	mutex_unlock(&vs->dev.mutex);
+
+	return 0;
+}
+
+static int vhost_scsi_open(struct inode *inode, struct file *f)
+{
+	struct vhost_scsi *s;
+	int r;
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
+	INIT_LIST_HEAD(&s->vs_completion_list);
+	spin_lock_init(&s->vs_completion_lock);
+
+	s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick;
+	s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick;
+	s->vqs[2].handle_kick = vhost_scsi_handle_kick;
+	r = vhost_dev_init(&s->dev, s->vqs, 3);
+	if (r < 0) {
+		kfree(s);
+		return r;
+	}
+
+	f->private_data = s;
+	return 0;
+}
+
+static int vhost_scsi_release(struct inode *inode, struct file *f)
+{
+	struct vhost_scsi *s = f->private_data;
+
+	if (s->vs_tpg && s->vs_tpg->tport) {
+		struct vhost_scsi_target backend;
+
+		memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
+				sizeof(backend.vhost_wwpn));
+		backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
+		vhost_scsi_clear_endpoint(s, &backend);
+	}
+
+	vhost_dev_cleanup(&s->dev, false);
+	kfree(s);
+	return 0;
+}
+
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+	if (features & ~VHOST_FEATURES)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&vs->dev.mutex);
+	if ((features & (1 << VHOST_F_LOG_ALL)) &&
+	    !vhost_log_access_ok(&vs->dev)) {
+		mutex_unlock(&vs->dev.mutex);
+		return -EFAULT;
+	}
+	vs->dev.acked_features = features;
+	/* TODO possibly smp_wmb() and flush vqs */
+	mutex_unlock(&vs->dev.mutex);
+	return 0;
+}
+
+static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
+				unsigned long arg)
+{
+	struct vhost_scsi *vs = f->private_data;
+	struct vhost_scsi_target backend;
+	void __user *argp = (void __user *)arg;
+	u64 __user *featurep = argp;
+	u64 features;
+	int r;
+
+	switch (ioctl) {
+	case VHOST_SCSI_SET_ENDPOINT:
+		if (copy_from_user(&backend, argp, sizeof backend))
+			return -EFAULT;
+
+		return vhost_scsi_set_endpoint(vs, &backend);
+	case VHOST_SCSI_CLEAR_ENDPOINT:
+		if (copy_from_user(&backend, argp, sizeof backend))
+			return -EFAULT;
+
+		return vhost_scsi_clear_endpoint(vs, &backend);
+	case VHOST_SCSI_GET_ABI_VERSION:
+		if (copy_from_user(&backend, argp, sizeof backend))
+			return -EFAULT;
+
+		backend.abi_version = VHOST_SCSI_ABI_VERSION;
+
+		if (copy_to_user(argp, &backend, sizeof backend))
+			return -EFAULT;
+		return 0;
+	case VHOST_GET_FEATURES:
+		features = VHOST_FEATURES;
+		if (copy_to_user(featurep, &features, sizeof features))
+			return -EFAULT;
+		return 0;
+	case VHOST_SET_FEATURES:
+		if (copy_from_user(&features, featurep, sizeof features))
+			return -EFAULT;
+		return vhost_scsi_set_features(vs, features);
+	default:
+		mutex_lock(&vs->dev.mutex);
+		r = vhost_dev_ioctl(&vs->dev, ioctl, arg);
+		mutex_unlock(&vs->dev.mutex);
+		return r;
+	}
+}
+
+static const struct file_operations vhost_scsi_fops = {
+	.owner          = THIS_MODULE,
+	.release        = vhost_scsi_release,
+	.unlocked_ioctl = vhost_scsi_ioctl,
+	/* TODO compat ioctl? */
+	.open           = vhost_scsi_open,
+	.llseek		= noop_llseek,
+};
+
+static struct miscdevice vhost_scsi_misc = {
+	MISC_DYNAMIC_MINOR,
+	"vhost-scsi",
+	&vhost_scsi_fops,
+};
+
+static int __init vhost_scsi_register(void)
+{
+	return misc_register(&vhost_scsi_misc);
+}
+
+static int vhost_scsi_deregister(void)
+{
+	return misc_deregister(&vhost_scsi_misc);
+}
+
+static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+{
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return "SAS";
+	case SCSI_PROTOCOL_FCP:
+		return "FCP";
+	case SCSI_PROTOCOL_ISCSI:
+		return "iSCSI";
+	default:
+		break;
+	}
+
+	return "Unknown";
+}
+
+static int tcm_vhost_port_link(
+	struct se_portal_group *se_tpg,
+	struct se_lun *lun)
+{
+	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+
+	atomic_inc(&tv_tpg->tv_tpg_port_count);
+	smp_mb__after_atomic_inc();
+
+	return 0;
+}
+
+static void tcm_vhost_port_unlink(
+	struct se_portal_group *se_tpg,
+	struct se_lun *se_lun)
+{
+	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+
+	atomic_dec(&tv_tpg->tv_tpg_port_count);
+	smp_mb__after_atomic_dec();
+}
+
+static struct se_node_acl *tcm_vhost_make_nodeacl(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl, *se_nacl_new;
+	struct tcm_vhost_nacl *nacl;
+	u64 wwpn = 0;
+	u32 nexus_depth;
+
+	/* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL); */
+	se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
+	if (!se_nacl_new)
+		return ERR_PTR(-ENOMEM);
+
+	nexus_depth = 1;
+	/*
+	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+	 * when converting a NodeACL from demo mode -> explict
+	 */
+	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+				name, nexus_depth);
+	if (IS_ERR(se_nacl)) {
+		tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
+		return se_nacl;
+	}
+	/*
+	 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
+	 */
+	nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
+	nacl->iport_wwpn = wwpn;
+
+	return se_nacl;
+}
+
+static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+{
+	struct tcm_vhost_nacl *nacl = container_of(se_acl,
+				struct tcm_vhost_nacl, se_node_acl);
+	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+	kfree(nacl);
+}
+
+static int tcm_vhost_make_nexus(
+	struct tcm_vhost_tpg *tv_tpg,
+	const char *name)
+{
+	struct se_portal_group *se_tpg;
+	struct tcm_vhost_nexus *tv_nexus;
+
+	mutex_lock(&tv_tpg->tv_tpg_mutex);
+	if (tv_tpg->tpg_nexus) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		pr_debug("tv_tpg->tpg_nexus already exists\n");
+		return -EEXIST;
+	}
+	se_tpg = &tv_tpg->se_tpg;
+
+	tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+	if (!tv_nexus) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+		return -ENOMEM;
+	}
+	/*
+	 *  Initialize the struct se_session pointer
+	 */
+	tv_nexus->tvn_se_sess = transport_init_session();
+	if (IS_ERR(tv_nexus->tvn_se_sess)) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		kfree(tv_nexus);
+		return -ENOMEM;
+	}
+	/*
+	 * Since we are running in 'demo mode' this call with generate a
+	 * struct se_node_acl for the tcm_vhost struct se_portal_group with
+	 * the SCSI Initiator port name of the passed configfs group 'name'.
+	 */
+	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+				se_tpg, (unsigned char *)name);
+	if (!tv_nexus->tvn_se_sess->se_node_acl) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		pr_debug("core_tpg_check_initiator_node_acl() failed"
+				" for %s\n", name);
+		transport_free_session(tv_nexus->tvn_se_sess);
+		kfree(tv_nexus);
+		return -ENOMEM;
+	}
+	/*
+	 * Now register the TCM vHost virtual I_T Nexus as active with the
+	 * call to __transport_register_session()
+	 */
+	__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
+			tv_nexus->tvn_se_sess, tv_nexus);
+	tv_tpg->tpg_nexus = tv_nexus;
+
+	mutex_unlock(&tv_tpg->tv_tpg_mutex);
+	return 0;
+}
+
+static int tcm_vhost_drop_nexus(
+	struct tcm_vhost_tpg *tpg)
+{
+	struct se_session *se_sess;
+	struct tcm_vhost_nexus *tv_nexus;
+
+	mutex_lock(&tpg->tv_tpg_mutex);
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		return -ENODEV;
+	}
+
+	se_sess = tv_nexus->tvn_se_sess;
+	if (!se_sess) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		return -ENODEV;
+	}
+
+	if (atomic_read(&tpg->tv_tpg_port_count)) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		pr_err("Unable to remove TCM_vHost I_T Nexus with"
+			" active TPG port count: %d\n",
+			atomic_read(&tpg->tv_tpg_port_count));
+		return -EPERM;
+	}
+
+	if (atomic_read(&tpg->tv_tpg_vhost_count)) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		pr_err("Unable to remove TCM_vHost I_T Nexus with"
+			" active TPG vhost count: %d\n",
+			atomic_read(&tpg->tv_tpg_vhost_count));
+		return -EPERM;
+	}
+
+	pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated"
+		" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+	/*
+	 * Release the SCSI I_T Nexus to the emulated vHost Target Port
+	 */
+	transport_deregister_session(tv_nexus->tvn_se_sess);
+	tpg->tpg_nexus = NULL;
+	mutex_unlock(&tpg->tv_tpg_mutex);
+
+	kfree(tv_nexus);
+	return 0;
+}
+
+static ssize_t tcm_vhost_tpg_show_nexus(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_nexus *tv_nexus;
+	ssize_t ret;
+
+	mutex_lock(&tv_tpg->tv_tpg_mutex);
+	tv_nexus = tv_tpg->tpg_nexus;
+	if (!tv_nexus) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%s\n",
+			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+	mutex_unlock(&tv_tpg->tv_tpg_mutex);
+
+	return ret;
+}
+
+static ssize_t tcm_vhost_tpg_store_nexus(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
+	unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+	int ret;
+	/*
+	 * Shutdown the active I_T nexus if 'NULL' is passed..
+	 */
+	if (!strncmp(page, "NULL", 4)) {
+		ret = tcm_vhost_drop_nexus(tv_tpg);
+		return (!ret) ? count : ret;
+	}
+	/*
+	 * Otherwise make sure the passed virtual Initiator port WWN matches
+	 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
+	 * tcm_vhost_make_nexus().
+	 */
+	if (strlen(page) >= TCM_VHOST_NAMELEN) {
+		pr_err("Emulated NAA Sas Address: %s, exceeds"
+				" max: %d\n", page, TCM_VHOST_NAMELEN);
+		return -EINVAL;
+	}
+	snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+
+	ptr = strstr(i_port, "naa.");
+	if (ptr) {
+		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
+			pr_err("Passed SAS Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_vhost_dump_proto_id(tport_wwn));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "fc.");
+	if (ptr) {
+		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
+			pr_err("Passed FCP Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_vhost_dump_proto_id(tport_wwn));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[3]; /* Skip over "fc." */
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "iqn.");
+	if (ptr) {
+		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
+			pr_err("Passed iSCSI Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_vhost_dump_proto_id(tport_wwn));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	pr_err("Unable to locate prefix for emulated Initiator Port:"
+			" %s\n", i_port);
+	return -EINVAL;
+	/*
+	 * Clear any trailing newline for the NAA WWN
+	 */
+check_newline:
+	if (i_port[strlen(i_port)-1] == '\n')
+		i_port[strlen(i_port)-1] = '\0';
+
+	ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
+	&tcm_vhost_tpg_nexus.attr,
+	NULL,
+};
+
+static struct se_portal_group *tcm_vhost_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_vhost_tport *tport = container_of(wwn,
+			struct tcm_vhost_tport, tport_wwn);
+
+	struct tcm_vhost_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct tcm_vhost_tpg");
+		return ERR_PTR(-ENOMEM);
+	}
+	mutex_init(&tpg->tv_tpg_mutex);
+	INIT_LIST_HEAD(&tpg->tv_tpg_list);
+	tpg->tport = tport;
+	tpg->tport_tpgt = tpgt;
+
+	ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
+				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		kfree(tpg);
+		return NULL;
+	}
+	mutex_lock(&tcm_vhost_mutex);
+	list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
+	mutex_unlock(&tcm_vhost_mutex);
+
+	return &tpg->se_tpg;
+}
+
+static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+
+	mutex_lock(&tcm_vhost_mutex);
+	list_del(&tpg->tv_tpg_list);
+	mutex_unlock(&tcm_vhost_mutex);
+	/*
+	 * Release the virtual I_T Nexus for this vHost TPG
+	 */
+	tcm_vhost_drop_nexus(tpg);
+	/*
+	 * Deregister the se_tpg from TCM..
+	 */
+	core_tpg_deregister(se_tpg);
+	kfree(tpg);
+}
+
+static struct se_wwn *tcm_vhost_make_tport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_vhost_tport *tport;
+	char *ptr;
+	u64 wwpn = 0;
+	int off = 0;
+
+	/* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL); */
+
+	tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+	if (!tport) {
+		pr_err("Unable to allocate struct tcm_vhost_tport");
+		return ERR_PTR(-ENOMEM);
+	}
+	tport->tport_wwpn = wwpn;
+	/*
+	 * Determine the emulated Protocol Identifier and Target Port Name
+	 * based on the incoming configfs directory name.
+	 */
+	ptr = strstr(name, "naa.");
+	if (ptr) {
+		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
+		goto check_len;
+	}
+	ptr = strstr(name, "fc.");
+	if (ptr) {
+		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
+		off = 3; /* Skip over "fc." */
+		goto check_len;
+	}
+	ptr = strstr(name, "iqn.");
+	if (ptr) {
+		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
+		goto check_len;
+	}
+
+	pr_err("Unable to locate prefix for emulated Target Port:"
+			" %s\n", name);
+	kfree(tport);
+	return ERR_PTR(-EINVAL);
+
+check_len:
+	if (strlen(name) >= TCM_VHOST_NAMELEN) {
+		pr_err("Emulated %s Address: %s, exceeds"
+			" max: %d\n", name, tcm_vhost_dump_proto_id(tport),
+			TCM_VHOST_NAMELEN);
+		kfree(tport);
+		return ERR_PTR(-EINVAL);
+	}
+	snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+
+	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
+		" %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+
+	return &tport->tport_wwn;
+}
+
+static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+{
+	struct tcm_vhost_tport *tport = container_of(wwn,
+				struct tcm_vhost_tport, tport_wwn);
+
+	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
+		" %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+		tport->tport_name);
+
+	kfree(tport);
+}
+
+static ssize_t tcm_vhost_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
+		"on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+		utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_vhost, version);
+
+static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
+	&tcm_vhost_wwn_version.attr,
+	NULL,
+};
+
+static struct target_core_fabric_ops tcm_vhost_ops = {
+	.get_fabric_name		= tcm_vhost_get_fabric_name,
+	.get_fabric_proto_ident		= tcm_vhost_get_fabric_proto_ident,
+	.tpg_get_wwn			= tcm_vhost_get_fabric_wwn,
+	.tpg_get_tag			= tcm_vhost_get_tag,
+	.tpg_get_default_depth		= tcm_vhost_get_default_depth,
+	.tpg_get_pr_transport_id	= tcm_vhost_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= tcm_vhost_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= tcm_vhost_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= tcm_vhost_check_true,
+	.tpg_check_demo_mode_cache	= tcm_vhost_check_true,
+	.tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
+	.tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
+	.tpg_alloc_fabric_acl		= tcm_vhost_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= tcm_vhost_release_fabric_acl,
+	.tpg_get_inst_index		= tcm_vhost_tpg_get_inst_index,
+	.release_cmd			= tcm_vhost_release_cmd,
+	.shutdown_session		= tcm_vhost_shutdown_session,
+	.close_session			= tcm_vhost_close_session,
+	.sess_get_index			= tcm_vhost_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= tcm_vhost_write_pending,
+	.write_pending_status		= tcm_vhost_write_pending_status,
+	.set_default_node_attributes	= tcm_vhost_set_default_node_attrs,
+	.get_task_tag			= tcm_vhost_get_task_tag,
+	.get_cmd_state			= tcm_vhost_get_cmd_state,
+	.queue_data_in			= tcm_vhost_queue_data_in,
+	.queue_status			= tcm_vhost_queue_status,
+	.queue_tm_rsp			= tcm_vhost_queue_tm_rsp,
+	.get_fabric_sense_len		= tcm_vhost_get_fabric_sense_len,
+	.set_fabric_sense_len		= tcm_vhost_set_fabric_sense_len,
+	/*
+	 * Setup callers for generic logic in target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= tcm_vhost_make_tport,
+	.fabric_drop_wwn		= tcm_vhost_drop_tport,
+	.fabric_make_tpg		= tcm_vhost_make_tpg,
+	.fabric_drop_tpg		= tcm_vhost_drop_tpg,
+	.fabric_post_link		= tcm_vhost_port_link,
+	.fabric_pre_unlink		= tcm_vhost_port_unlink,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= tcm_vhost_make_nodeacl,
+	.fabric_drop_nodeacl		= tcm_vhost_drop_nodeacl,
+};
+
+static int tcm_vhost_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric;
+	int ret;
+
+	pr_debug("TCM_VHOST fabric module %s on %s/%s"
+		" on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+		utsname()->machine);
+	/*
+	 * Register the top level struct config_item_type with TCM core
+	 */
+	fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
+	if (IS_ERR(fabric)) {
+		pr_err("target_fabric_configfs_init() failed\n");
+		return PTR_ERR(fabric);
+	}
+	/*
+	 * Setup fabric->tf_ops from our local tcm_vhost_ops
+	 */
+	fabric->tf_ops = tcm_vhost_ops;
+	/*
+	 * Setup default attribute lists for various fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	/*
+	 * Register the fabric for use within TCM
+	 */
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		pr_err("target_fabric_configfs_register() failed"
+				" for TCM_VHOST\n");
+		return ret;
+	}
+	/*
+	 * Setup our local pointer to *fabric
+	 */
+	tcm_vhost_fabric_configfs = fabric;
+	pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
+	return 0;
+};
+
+static void tcm_vhost_deregister_configfs(void)
+{
+	if (!tcm_vhost_fabric_configfs)
+		return;
+
+	target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
+	tcm_vhost_fabric_configfs = NULL;
+	pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
+};
+
+static int __init tcm_vhost_init(void)
+{
+	int ret = -ENOMEM;
+
+	tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
+	if (!tcm_vhost_workqueue)
+		goto out;
+
+	ret = vhost_scsi_register();
+	if (ret < 0)
+		goto out_destroy_workqueue;
+
+	ret = tcm_vhost_register_configfs();
+	if (ret < 0)
+		goto out_vhost_scsi_deregister;
+
+	return 0;
+
+out_vhost_scsi_deregister:
+	vhost_scsi_deregister();
+out_destroy_workqueue:
+	destroy_workqueue(tcm_vhost_workqueue);
+out:
+	return ret;
+};
+
+static void tcm_vhost_exit(void)
+{
+	tcm_vhost_deregister_configfs();
+	vhost_scsi_deregister();
+	destroy_workqueue(tcm_vhost_workqueue);
+};
+
+MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_vhost_init);
+module_exit(tcm_vhost_exit);
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
new file mode 100644
index 0000000..c983ed2
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.h
@@ -0,0 +1,101 @@
+#define TCM_VHOST_VERSION  "v0.1"
+#define TCM_VHOST_NAMELEN 256
+#define TCM_VHOST_MAX_CDB_SIZE 32
+
+struct tcm_vhost_cmd {
+	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
+	int tvc_vq_desc;
+	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
+	u64 tvc_tag;
+	/* The number of scatterlists associated with this cmd */
+	u32 tvc_sgl_count;
+	/* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+	u32 tvc_lun;
+	/* Pointer to the SGL formatted memory from virtio-scsi */
+	struct scatterlist *tvc_sgl;
+	/* Pointer to response */
+	struct virtio_scsi_cmd_resp __user *tvc_resp;
+	/* Pointer to vhost_scsi for our device */
+	struct vhost_scsi *tvc_vhost;
+	/* The TCM I/O descriptor that is accessed via container_of() */
+	struct se_cmd tvc_se_cmd;
+	/* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+	struct work_struct work;
+	/* Copy of the incoming SCSI command descriptor block (CDB) */
+	unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+	/* Sense buffer that will be mapped into outgoing status */
+	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
+	/* Completed commands list, serviced from vhost worker thread */
+	struct list_head tvc_completion_list;
+};
+
+struct tcm_vhost_nexus {
+	/* Pointer to TCM session for I_T Nexus */
+	struct se_session *tvn_se_sess;
+};
+
+struct tcm_vhost_nacl {
+	/* Binary World Wide unique Port Name for Vhost Initiator port */
+	u64 iport_wwpn;
+	/* ASCII formatted WWPN for Sas Initiator port */
+	char iport_name[TCM_VHOST_NAMELEN];
+	/* Returned by tcm_vhost_make_nodeacl() */
+	struct se_node_acl se_node_acl;
+};
+
+struct tcm_vhost_tpg {
+	/* Vhost port target portal group tag for TCM */
+	u16 tport_tpgt;
+	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
+	atomic_t tv_tpg_port_count;
+	/* Used for vhost_scsi device reference to tpg_nexus */
+	atomic_t tv_tpg_vhost_count;
+	/* list for tcm_vhost_list */
+	struct list_head tv_tpg_list;
+	/* Used to protect access for tpg_nexus */
+	struct mutex tv_tpg_mutex;
+	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
+	struct tcm_vhost_nexus *tpg_nexus;
+	/* Pointer back to tcm_vhost_tport */
+	struct tcm_vhost_tport *tport;
+	/* Returned by tcm_vhost_make_tpg() */
+	struct se_portal_group se_tpg;
+};
+
+struct tcm_vhost_tport {
+	/* SCSI protocol the tport is providing */
+	u8 tport_proto_id;
+	/* Binary World Wide unique Port Name for Vhost Target port */
+	u64 tport_wwpn;
+	/* ASCII formatted WWPN for Vhost Target port */
+	char tport_name[TCM_VHOST_NAMELEN];
+	/* Returned by tcm_vhost_make_tport() */
+	struct se_wwn tport_wwn;
+};
+
+/*
+ * As per request from MST, keep TCM_VHOST related ioctl defines out of
+ * linux/vhost.h (user-space) for now..
+ */
+
+#include <linux/vhost.h>
+
+/*
+ * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
+ *
+ * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
+ *            RFC-v2 vhost-scsi userspace.  Add GET_ABI_VERSION ioctl usage
+ */
+
+#define VHOST_SCSI_ABI_VERSION	0
+
+struct vhost_scsi_target {
+	int abi_version;
+	unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
+	unsigned short vhost_tpgt;
+};
+
+/* VHOST_SCSI specific defines */
+#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
+#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
+#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 2e471c2..f8a79fc 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -372,8 +372,12 @@
 	struct vc_data *vc = NULL;
 	int c;
 	int mode;
+	int ret;
 
-	console_lock();
+	ret = console_trylock();
+	if (ret == 0)
+		return;
+
 	if (ops && ops->currcon != -1)
 		vc = vc_cons[ops->currcon].d;
 
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index d90062b..92d08e7 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -91,6 +91,11 @@
 	.fops = &w1_therm_fops,
 };
 
+static struct w1_family w1_therm_family_DS1825 = {
+	.fid = W1_THERM_DS1825,
+	.fops = &w1_therm_fops,
+};
+
 struct w1_therm_family_converter
 {
 	u8			broken;
@@ -120,6 +125,10 @@
 		.f		= &w1_therm_family_DS28EA00,
 		.convert	= w1_DS18B20_convert_temp
 	},
+	{
+		.f		= &w1_therm_family_DS1825,
+		.convert	= w1_DS18B20_convert_temp
+	}
 };
 
 static inline int w1_DS18B20_convert_temp(u8 rom[9])
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index b00ada4..a1f0ce1 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -39,6 +39,7 @@
 #define W1_EEPROM_DS2431	0x2D
 #define W1_FAMILY_DS2760	0x30
 #define W1_FAMILY_DS2780	0x32
+#define W1_THERM_DS1825		0x3B
 #define W1_FAMILY_DS2781	0x3D
 #define W1_THERM_DS28EA00	0x42
 
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 181fa81..858c971 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -37,7 +37,6 @@
      */
 
 struct zorro_bus {
-	struct list_head devices;	/* list of devices on this bus */
 	struct device dev;
 };
 
@@ -136,7 +135,6 @@
 	if (!bus)
 		return -ENOMEM;
 
-	INIT_LIST_HEAD(&bus->devices);
 	bus->dev.parent = &pdev->dev;
 	dev_set_name(&bus->dev, "zorro");
 	error = device_register(&bus->dev);
diff --git a/firmware/Makefile b/firmware/Makefile
index 344713b..76628e3 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -43,7 +43,6 @@
 fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin
 fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
 				   cxgb3/t3c_psram-1.1.0.bin \
-				   cxgb3/t3fw-7.10.0.bin \
 				   cxgb3/ael2005_opt_edc.bin \
 				   cxgb3/ael2005_twx_edc.bin \
 				   cxgb3/ael2020_twx_edc.bin
diff --git a/firmware/cxgb3/t3fw-7.10.0.bin.ihex b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
deleted file mode 100644
index 96399d8..0000000
--- a/firmware/cxgb3/t3fw-7.10.0.bin.ihex
+++ /dev/null
@@ -1,1935 +0,0 @@
-:1000000060007400200380002003700000001000D6
-:1000100000002000E100028400070000E1000288E7
-:1000200000010000E0000000E00000A0010000006E
-:1000300044444440E3000183200200002001E0002A
-:100040002001FF101FFFD0001FFFC000E300043C91
-:100050000200000020006C841FFFC2A020006CCCB6
-:100060001FFFC2A420006D0C1FFFC2A820006D80DE
-:100070001FFFC2AC200003C0C00000E43100EA3121
-:1000800000A13100A03103020002ED306E2A05000C
-:10009000ED3100020002160012FFDBC03014FFDA5F
-:1000A000D30FD30FD30F03431F244C107249F0D347
-:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
-:1000C000D30FD30F03431F244C107249F0D30FD327
-:1000D0000FD30F14FFCE03421F14FFCB03421F1296
-:1000E000FFCCC0302D37302D37342D37382D373CED
-:1000F000233D017233ED00020012FFC4C0302F37E0
-:10010000002F37102F37202F3730233D017233ED6A
-:1001100000020012FFBEC0302737002737102737F4
-:1001200020273730233D017233ED03020012FFB95F
-:1001300013FFBA0C0200932012FFB913FFB90C028F
-:1001400000932012FFB8C0319320822012FFB71312
-:10015000FFB7932012FFB715FFB316FFB6C030D715
-:100160002005660160001B00000000000000000088
-:10017000043605000200D30FD30F05330C6E3B1479
-:100180000747140704437631E604360505330C6F40
-:100190003BED00020012FFA615FFA3230A00D720A3
-:1001A000070443043E0505330C0747146F3BF00377
-:1001B000020012FFA1C03014FFA1D30FD30FD30F41
-:1001C0009340B4447249F2D30FD30FD30F14FF9B63
-:1001D000834014FF9B834012FF9B230A0014FF9A65
-:1001E000D30FD30FD30F9340B4447249F2D30FD33C
-:1001F0000FD30F14FF95834012FF95C92F832084DE
-:10020000218522BC22743B0F8650B4559630B433FE
-:100210007433F463FFE60000653FE1655FDE12FFC3
-:100220007C230A0028374028374428374828374C91
-:10023000233D017233ED03020000020012FF7AC079
-:1002400032032E0503020012FF7813FF819320C0B2
-:1002500011014931004831010200C00014FF7E0441
-:10026000D23115FF7D945014FF7D04D33115FF7CEE
-:10027000945014FF7C04D43115FF7C24560014FFE5
-:100280007B04D53115FF7B24560010FF7A03000054
-:10029000000000000000000000000000000000005E
-:1002A000000000000000000000000000000000004E
-:1002B000000000000000000000000000000000003E
-:1002C000000000000000000000000000000000002E
-:1002D000000000000000000000000000000000001E
-:1002E000000000000000000000000000000000000E
-:1002F00000000000000000000000000000000000FE
-:1003000000000000000000000000000000000000ED
-:1003100000000000000000000000000000000000DD
-:1003200000000000000000000000000000000000CD
-:1003300000000000000000000000000000000000BD
-:1003400000000000000000000000000000000000AD
-:10035000000000000000000000000000000000009D
-:10036000000000000000000000000000000000008D
-:10037000000000000000000000000000000000007D
-:10038000000000000000000000000000000000006D
-:10039000000000000000000000000000000000005D
-:1003A000000000000000000000000000000000004D
-:1003B000000000000000000000000000000000003D
-:1003C000000000000000000000000000000000002D
-:1003D000000000000000000000000000000000001D
-:1003E000000000000000000000000000000000000D
-:1003F00000000000000000000000000000000000FD
-:1004000000000000000000000000000000000000EC
-:1004100000000000000000000000000000000000DC
-:1004200063FFFC000000000000000000000000006E
-:100430000000000000000000000000001FFC0000A1
-:100440001FFC0000E30005C81FFC00001FFC0000AB
-:10045000E30005C81FFC00001FFC0000E30005C806
-:100460001FFFC0001FFFC000E30005C81FFFC00042
-:100470001FFFC018E30005C81FFFC0181FFFC018EA
-:10048000E30005E01FFFC0181FFFC294E30005E072
-:100490001FFFC2941FFFC294E300085C1FFFC2A0AD
-:1004A0001FFFC59CE300085C200000002000016ADB
-:1004B000E3000B582000018020000180E3000CC401
-:1004C0002000020020000203E3000CC42000021CF4
-:1004D00020000220E3000CC8200002202000022699
-:1004E000E3000CCC2000023C20000240E3000CD4CE
-:1004F0002000024020000249E3000CD82000024CFA
-:1005000020000250E3000CE42000025020000259B9
-:10051000E3000CE82000025C20000260E3000CF421
-:100520002000026020000269E3000CF82000026C49
-:1005300020000270E3000D04200002702000027908
-:10054000E3000D082000028C2000028CE3000D1453
-:100550002000029020000293E3000D14200002AC62
-:10056000200002B0E3000D18200002D0200002F2AB
-:10057000E3000D1C200003B0200003B0E3000D4099
-:10058000200003B0200003B0E3000D40200003B0C2
-:10059000200003B0E3000D40200003B0200003B0B2
-:1005A000E3000D40200003B020006EA4E3000D40E6
-:1005B00020006EA420006EA4E30078340000000048
-:1005C00000000000000000001FFC00001FFC0000F5
-:1005D0001FFFC5A01FFFC69020006EA820006EA8B8
-:1005E000DEFFFE000000080CDEADBEEF1FFFC2B054
-:1005F0001FFCFE001FFFC0A41FFFC5D0300000007D
-:10060000003FFFFF8040000010000000080FFFFFC8
-:100610001FFFC27D000FFFFF804FFFFF8000000023
-:1006200000000880B000000560500000600000007D
-:1006300040000011350000004100000010000001E2
-:100640002000000000001000400000000500000035
-:10065000800000190400000000000800E100020012
-:1006600010000005806000007000000020000009FC
-:10067000001FF8008000001EA0000000F80000002D
-:1006800007FFFFFF080000001800000001008001C4
-:10069000420000001FFFC22D1FFFC0EC00010080C0
-:1006A000604000001A0000000C0000001000000A6A
-:1006B000000030000001000080000018FC00000075
-:1006C0008000000100004000600008008000001C65
-:1006D0008000001A030000008000040004030403EB
-:1006E00050000003FFFFBFFF1FFFC3E400000FFF28
-:1006F000FFFFF000000016D00000FFF7A50000008B
-:100700001FFFC4C01FFFC4710001000800000B20C0
-:10071000202FFF801FFFC46500002C00FFFEFFF8A4
-:1007200000FFFFFF1FFFC58800002000FFFFDFFF65
-:100730000000FFEF010011001FFFC3E21FFFC5A073
-:10074000FFFFEFFF0000FFFB1FFFC6501FFFBEB003
-:10075000FFFFF7FF1FFFC0740000FFFD1FFFC64033
-:100760000001FBD01FFFC5C01FFFC6801FFFC5A132
-:10077000E0FFFE001FFFC5B0000080001FFFC54C5A
-:100780001FFFC5C41FFFC0781FFFC4E41FFCFFD8B4
-:10079000000100817FFFFFFFE1000600000027103D
-:1007A0001FFCFE301FFCFE701FFFC5481FFFC56009
-:1007B0000003D0901FFFC5742B5063802B507980AD
-:1007C0002B5090802B50A6801FFFC4790100110F81
-:1007D000202FFE0020300080202FFF000000FFFFB0
-:1007E0000001FFF82B50B2002B50B208000100109E
-:1007F0002B50B1802B50B2802B50BA000001001159
-:100800002B50BD282B50BC802B50BDA020300000A9
-:10081000DFFFFE005000000200C0000002000000E8
-:10082000FFFFF7F41FFFC07C000FF800044000003A
-:10083000001000000C4000001C400000E00000A080
-:100840001FFFC5501FFD00081FFFC5641FFFC578AF
-:100850001FFFC58CE1000690E10006EC00000000DF
-:100860000000000000000000000000000100000087
-:100870000000000000000000000000002010004008
-:10088000201000402010004020140080200C0000A8
-:10089000200C0000200C00002010004020140080DC
-:1008A0002014008020140080201800C0201C0100AB
-:1008B000201C0100201C010020200140201800C045
-:1008C000201800C0201800C0201C0100201800C003
-:1008D000201800C0201800C0201C0100202001406A
-:1008E00020200140202001402020094020200940F4
-:1008F000202009402020094020240980FFFFFFFF1D
-:10090000FFFFFFFFFFFFFFFF0000000000000000EF
-:1009100000000000000000000000000020005588DA
-:1009200020005458200055882000558820005394FA
-:100930002000539420005394200051D4200051D41F
-:10094000200051CC2000513820004FE020004DC045
-:1009500020004B94000000000000000020005558CB
-:1009600020005424200054C8200054C82000527C89
-:100970002000527C2000527C2000527C2000527CBF
-:10098000200051C42000527C20004F0020004D70F8
-:1009900020004B40000000000000000020000BF091
-:1009A00020003ADC200004C02000473020000BE883
-:1009B000200041F4200003F0200046F020004B1CF2
-:1009C00020003F0020003E1C20003A58200038E85C
-:1009D00020003658200031B820003C7820002DD06F
-:1009E0002000286420006828200023F0200020D068
-:1009F0002000207C20001D68200018602000158841
-:100A000020000E5420000C3420001134200013204C
-:100A1000200043EC20003EB420000BF8200004C06E
-:100A200000000000000000000000000000000000C6
-:100A300000000000000000000000000000000000B6
-:100A400000000000000000000000000000000000A6
-:100A50000000000000000000000000000000000096
-:100A60000000000000000000000000000000000086
-:100A70000000000000000000000000000000000076
-:100A80000000000000000000000000000000000066
-:100A90000000000000000000000000000000000056
-:100AA0003264000000000000326400006400640052
-:100AB00064006400640064006400640000000000DE
-:100AC0000000000000000000000000000000000026
-:100AD0000000000000000000000000000000000016
-:100AE0000000000000000000000000000000000006
-:100AF00000000000000000000000000000000000F6
-:100B000000000000000010000000000000000000D5
-:100B100000000000000000000000000000001000C5
-:100B200000000000000000000000000000000000C5
-:100B300000432380000000000000000000000000CF
-:100B400000000000000000000000000000000000A5
-:100B50000000000000000000005C94015D94025E53
-:100B600094035F94004300000000000000000000B8
-:100B70000000000000000000000000000000000075
-:100B80000000000000000000000000000000000065
-:100B90000000000000000000005C90015D90025E1B
-:100BA00090035F9000530000000000000000000070
-:100BB0000000000000000000000000000000000035
-:100BC0000000000000000000000000000000000025
-:100BD0000000000000000000009C94001D90019D9A
-:100BE00094029E94039F94040894050994060A9421
-:100BF000070B94004300000000000000000000000C
-:100C000000000000000000000000000000000000E4
-:100C10000000000000000000009C90019D90029EDA
-:100C200090071D90039F90047890057990067A9024
-:100C3000077B90005300000000000000000000004F
-:100C400000000000000000000000000000000000A4
-:100C5000000000000000000000DC94001D9001DD99
-:100C60009402DE9403DF940404940505940606942C
-:100C70000707940808940909940A0A940B0B940036
-:100C80004300000000000000000000000000000021
-:100C9000000000000000000000DC9001DD9002DE9A
-:100CA000900B1D9003DF9004B49005B59006B690AC
-:100CB00007B79008B89009B9900ABA900BBB90009A
-:100CC0005300000063FFFC0020006C6010FFFF0A6F
-:100CD0000000000020006C8400D23110FFFE0A00EA
-:100CE0000000000020006CCC00D33110FFFE0A0091
-:100CF0000000000020006D0C00D43110FFFE0A003F
-:100D00000000000020006D8000D53110FFFE0A00B9
-:100D10000000000063FFFC00E00000A012FFF7826B
-:100D200020028257C82163FFFC12FFF303E830045E
-:100D3000EE3005C03093209421952263FFFC000023
-:100D40001FFFD000000400201FFFC5A01FFFC6909A
-:100D5000200A0011FFFB13FFFB03E631010200161E
-:100D6000FFFA17FFFAD30F776B069060B4667763CC
-:100D7000F85415F3541AA50F140063FFF90000008E
-:100D80006C1004C020D10F006C1004C0C71AEF060D
-:100D9000D830BC2BD72085720D4211837105450BCD
-:100DA000957202330C2376017B3B04233D0893713B
-:100DB000A32D12EEFE19EEFEA2767D632C2E0A0004
-:100DC000088202280A01038E380E0E42C8EE29A6B8
-:100DD0007E6D4A0500208800308C8271D10FC0F0F2
-:100DE000028F387FC0EA63FFE400C0F1C050037E89
-:100DF0000CA2EE0E3D1208820203F538050542CB27
-:100E00005729A67E2FDC100F4F366DFA050020887B
-:100E100000308CBC75C03008E208280A0105833810
-:100E2000030342C93E29A67E0D480CD30F6D8A05E7
-:100E300000208800B08C8271D10FC05008F5387541
-:100E4000C0C163FFBBC06002863876C0DA63FFD4DE
-:100E50006C101216EED8C1F9C1E8C1C72B221E28AA
-:100E6000221DC0D07B81352920060BB702299CFAB0
-:100E7000655008282072288CFF2824726491642A07
-:100E8000B0000CA80C64816F0EA90C6492BB7FA10A
-:100E90003FC1CE7CA13669AC336000370029200603
-:100EA000D7D0299CFACC57282072288CFF2824728E
-:100EB0006491392AD0000CA80C6481680EA90C64D6
-:100EC000931F7FA10BC1CE7CA10268AC06C020D1CC
-:100ED0000F2D25028A32C0900A6E5065E5B529248F
-:100EE00067090F4765F5B12C200C1FEEB30CCE112E
-:100EF000AFEE29E286B44879830260058219EEAF2D
-:100F000009C90A2992A36890078F2009FF0C65F58B
-:100F10006E2FE28564F56865559628221D7B810554
-:100F2000D9B060000200C0908B9417EEA50B881416
-:100F300087740B0B47A87718EEA309BB100877023C
-:100F400097F018EEA117EEA208A8010B8802074738
-:100F5000021BEE9E97F10B880298F22790232B90AC
-:100F60002204781006BB1007471208BB0228902104
-:100F70000777100C88100788020B880217EE968BF3
-:100F80003307BB0187340B880298F3979997F48B4A
-:100F90009587399BF588968B3898F688979BF897B4
-:100FA000F998F717EE8D28E28507C7082D74CF084A
-:100FB000480B28E68565550F2B221E28221D7B89AC
-:100FC000022B0A0064BF052CB00728B000DA200607
-:100FD000880A28824CC0D10B8000DBA065AFE76394
-:100FE000FEEA0000292072659E946004E72A2072C0
-:100FF00065AEBF6004DE00002EB0032C2067D4E095
-:1010000065C1058A328C330AFF500C4554BC5564C7
-:10101000F4EB19EE72882A09A90109880C64821F71
-:10102000C0926000DD2ED0032A2067D4E065A0D8EE
-:101030008A328B330AFC500B4554BC5564C4BE192C
-:10104000EE67882A09A9017989D50BEA5064A4E3DF
-:101050000CEE11C0F02F16132E16168AE78CE82A14
-:1010600016128EE9DFC0AAEA7EAB01B1CF0BA85001
-:101070006583468837DBC0AE89991E789B022BCCEE
-:10108000012B161B29120E2B0A0029161A7FC307E3
-:101090007FC9027EAB01C0B165B49D8B352F0A00BC
-:1010A0002A0A007AC30564C3CB2F0A0165F4892B91
-:1010B00012162B1619005104C0C100CC1A2CCCFFFB
-:1010C0002C16170CFC132C16182B121A2A121BDCC8
-:1010D000505819B6C0D0C0902E5CF42C12172812AC
-:1010E000182F121B2A121A08FF010CAA01883407B4
-:1010F0004C0AAB8B2812192BC6162F86082A860994
-:101100002E74102924672E70038975B1EA2A74039E
-:10111000B09909490C659DB42B20672D250265B354
-:10112000FA2B221E2C221D7BC901C0B064BD9D2C50
-:10113000B00728B000DA2006880A28824CC0D10BFC
-:101140008000DBA065AFE763FD8289BAB199659045
-:101150009788341CEE2398BA8F331EEE1C0F4F5421
-:101160002FB42C8D2A8A320EDD020CAC017DC966AB
-:101170000A49516F92608A3375A65B2CB0130AED51
-:10118000510DCD010D0D410C0C417DC9492EB01200
-:10119000B0EE65E3C6C0D08E378CB88A368FB97C86
-:1011A000A3077AC9027EFB01C0D1CED988350AAD2A
-:1011B000020E8E0878EB022DAC0189B7DAC0AF9B26
-:1011C00079BB01B1CADCB0C0B07DA3077AD9027C7B
-:1011D000EB01C0B164B161C091292467C020D10F77
-:1011E00000008ADAB1AA64A0C02C20672D25026510
-:1011F000C3111DEDF68A321EEDFB0DAD010EDD0CA7
-:1012000065D28A0A4E516FE202600281C0902924A1
-:1012100067090F4765F2F828221D7B89022B0A0017
-:1012200064BCA92CB00728B000DA2006880A2882FE
-:101230004CC0D10B8000DBA065AFE763FC8E0000E3
-:101240000CE9506492ED0CEF11C080281611AFBF6D
-:101250002F16198EF88BF7DAE08FF92B1610ABFBEF
-:101260007FBB01B1EA0CA8506580D68837DCE0AFBF
-:1012700089991C789B022CEC012C161B29120C2C32
-:101280000A0029161A7AE3077AE9027FBB01C0C176
-:1012900065C2A58B352C0A002A0A007AE30564E1B1
-:1012A000CA2C0A0164CE0D60028E88341BEDCD98E5
-:1012B000DA8F331EEDC60F4F542FD42C8C2A8A326E
-:1012C0000ECC020BAB010CBB0C65BF0A0A49516E78
-:1012D000920263FF018A330AAB5064BEF92CD0132B
-:1012E0000AEE510ECE010E0E410C0C410ECC0C65D7
-:1012F000CEE42FD012B0FF65F26EC0B08E378CD81E
-:101300008A362FD2097CA3077AC9027EFB01C0B1BD
-:1013100065BEC38835DBA0AE8E78EB01B1AB89D753
-:10132000DAC0AF9D79DB01B1CAC0C07BA3077AB92F
-:10133000027DEB01C0C165CE9DC090292467C0200D
-:10134000D10F88378C3698140CE90C29161408F83C
-:101350000C981D78FB07281214B088281614891DD4
-:101360009F159B16C0F02B121429161A2B161B8BD7
-:10137000147AE30B7AE90688158E1678EB01C0F132
-:1013800065F1BA29121A2F12118A352E121B9A1AD8
-:10139000AFEE2F1210C0A0AF9F79FB01B1EE9F11ED
-:1013A000881AC0F098107AE30A7EA9052A12017AF9
-:1013B0008B01C0F164F08160018389368B37991706
-:1013C0000BE80C981F09C90C29161578EB07281291
-:1013D00015B088281615D9C09A199E188A1F2E1282
-:1013E000152A161A2E161BDAC0C0E08C177F930B35
-:1013F0007FA90688188F1978FB01C0E165E13E29B5
-:10140000121A2F12138A352E121B9A1BAFEE2F12AF
-:1014100012C0A0AF9F79FB01B1EE9F13881BC0F0F3
-:1014200098127AE30A7EA9052A12037A8B01C0F189
-:1014300065F10A2E12162E16192A121B005104C02D
-:10144000E100EE1AB0EE2E16170EFF132F16180F2E
-:10145000CC01ACAA2F121A0EBC01ACFC7FCB01B19F
-:10146000AA2A161B2C161A63FC5E00007FB30263C7
-:10147000FE3163FE2B7EB30263FC3063FC2A000066
-:101480006450C0DA20DBC058168AC020D10FC0914A
-:1014900063FD7A00C09163FA44DA20DB70C0D12E7C
-:1014A0000A80C09A2924682C7007581575D2A0D1DB
-:1014B0000F03470B18ED4DDB70A8287873022B7DC6
-:1014C000F8D9B063FA6100002A2C74DB40580EEEA4
-:1014D00063FAE4000029221D2D25027B9901C0B08A
-:1014E000C9B62CB00728B000DA2006880A28824C3A
-:1014F000C0D10B8000DBA065AFE7C020D10FC09149
-:1015000063FBFF00022A0258024C0AA202060000F6
-:10151000022A025802490AA202060000DB70DA2001
-:10152000C0D12E0A80C09E2924682C7007581554FB
-:10153000C020D10FC09463FBC9C09663FBC4C096A2
-:1015400063FBBF002A2C74DB30DC405BFE0FDBA0AA
-:10155000C2A02AB4002C200C63FF27008D358CB765
-:101560007DCB0263FDD263FC6D8F358ED77FEB029E
-:1015700063FDC563FC6000006C1004C020D10F0047
-:101580006C1004C020D10F006C10042B221E2822E6
-:101590001DC0A0C0942924062A25027B8901DBA056
-:1015A000C9B913ED04DA2028B0002CB00703880A6B
-:1015B00028824CC0D10B8000DBA065AFE7C020D1F2
-:1015C0000F0000006C10042C20062A210268C805B8
-:1015D00028CCF965812E0A094C6591048F30C1B879
-:1015E0000F8F147FB00528212365812716ECF3297E
-:1015F000629E6F98026000F819ECEF2992266890BD
-:10160000078A2009AA0C65A0E72A629D64A0E12B45
-:10161000200C0CB911A6992D92866FD9026000DBBF
-:101620001DECE70DBD0A2DD2A368D0078E200DEE6C
-:101630000C65E0C7279285C0E06470BF1DECEC68C4
-:10164000434E1CECEB8A2B0CAA029A708920089955
-:10165000110D99029971882A98748F329F752821EB
-:1016600004088811987718ECDC0CBF11A6FF2DF246
-:1016700085A8B82E84CF2DDC282DF685C85A2A2CB3
-:1016800074DB40580E81D2A0D10FC020D10F0000D2
-:101690000029CCF96490B12C20668931B1CC0C0CB6
-:1016A000472C24666EC60260008509F85065807F6D
-:1016B0001CECD18A2B0F08400B881008AA020CAA38
-:1016C000029A7089200899110D99029971883398AE
-:1016D000738C329C728A2A9A748934997563FF7D5F
-:1016E00000CC57DA20DB30DC4058155FC020D10F2A
-:1016F00000DA20C0B65815EE63FFE500DA20581571
-:10170000EC63FFDC00DA20DB30DC40DD5058167A79
-:10171000D2A0D10FC858DA20DB305814C72A2102D2
-:1017200065AFBDC09409A90229250263FFB200007C
-:101730002B21045814731DECADC0E02E24668F30AD
-:101740002B200C0F8F1463FF66292138C088798302
-:101750001F8C310CFC5064CF562B2104C0C0581490
-:10176000681DECA2C0E08F302B200C0F8F1463FF9C
-:101770003E2C20662B2104B1CC0C0C472C2466583F
-:1017800014601DEC9AC0E02E24668F302B200C0FC5
-:101790008F1463FF1A0000006C1004C0B7C0A116BC
-:1017A000EC9615EC88D720D840B822C04005350209
-:1017B0009671957002A438040442C94B1AEC7B1947
-:1017C000EC7C29A67EC140D30F6D4A0500808800BD
-:1017D000208C220A88A272D10FC05008A53875B09B
-:1017E000E363FFD76C10069313941129200665520A
-:1017F00088C0716898052A9CF965A29816EC6F2933
-:1018000021028A1309094C6590CD8AA00A6A512ADF
-:10181000ACFD65A0C2CC5FDB30DA208C115815120C
-:10182000C0519A13C7BF9BA98E132EE20968E060CE
-:101830002F629E1DEC606FF8026000842DD2266836
-:10184000D0052F22007DF9782C629DC79064C0706E
-:101850009C108A132B200C2AA0200CBD11A6DD0A97
-:101860004F14BFA809880129D286AF88288C09792E
-:101870008B591FEC520FBF0A2FF2A368F0052822E4
-:10188000007F894729D285D4906590756000430018
-:10189000002B200C1FEC4A0CBD11A6DD29D2860FAF
-:1018A000BF0A6E96102FF2A368F00488207F890586
-:1018B00029D285659165DA2058157DC95C6001FFE4
-:1018C00000DA20C0B658157A60000C00C09063FFA3
-:1018D000B50000DA205815766551E48D138C11DBC4
-:1018E000D08DD0022A020D6D515813E39A1364A1D2
-:1018F000CEC75F8FA195A9C0510F0F479F1163FEFF
-:10190000FD00C091C0F12820062C2066288CF9A784
-:10191000CC0C0C472C24666FC6098D138DD170DE5C
-:1019200002290A00099D02648159C9D38A102B211A
-:10193000045813F38A13C0B02B24662EA2092AA0E0
-:10194000200E28141CEC298D1315EC1DC1700A778C
-:101950003685562DDC28AC2C9C12DED0A8557CD3C5
-:10196000022EDDF8D3E0DA40055B02DC305BFF8A53
-:10197000D4A028200CB455C0D02B0A882F0A800C84
-:101980008C11A6CC29C285AF3FAB9929C6851CEC2A
-:1019900012DEF0AC882D84CF28120229120378F3CE
-:1019A000022EFDF8289020D3E007880CC1700808AB
-:1019B00047289420087736657FAB891313EC10898C
-:1019C00090C0F47797491BEC0EC1CA2821048513F7
-:1019D000099E4006EE11875304881185520E880235
-:1019E0000C88029BA09FA18F2B9DA598A497A795DB
-:1019F000A603FF029FA22C200C1EEBF7AECE0CCC50
-:101A00001106CC082BC2852DE4CF2BBC202BC6851C
-:101A10002A2C748B11580D9CD2A0D10F28203DC0C8
-:101A2000E07C877F2E24670E0A4765A07B1AEBF5C2
-:101A300088201EEBE38F138EE48FF40888110A8848
-:101A4000020F8F14AFEE1FEBF098910FEE029E90F5
-:101A50001EEBEFC0801AEBE02CD285AABAB8CC28D6
-:101A6000A4CF2CD6852C21022F20720ECC02B1FFE0
-:101A70002F24722C2502C020D10F871387700707EF
-:101A80004763FD6E282138C099798B0263FE9ADD89
-:101A9000F063FE9500DA20DB308C11DD505815968E
-:101AA000D2A0D10FC0E163FF7A8B138C11DD50C03F
-:101AB000AA2E0A802A2468DA205813F1D2A0D10F66
-:101AC000C020D10F6C1006292102C0D07597102AB2
-:101AD00032047FA70A8B357FBF052D25020DD90261
-:101AE000090C4C65C18216EBB41EEBB228629EC095
-:101AF000FA78F30260018829E2266890078A2009B3
-:101B0000AA0C65A17A2A629DDFA064A1772B200C24
-:101B10000CBC11A6CC29C286C08C79830260015707
-:101B200019EBA709B90A2992A368900788200988A8
-:101B30000C65814327C2851CEBA964713A89310980
-:101B40008B140CBB016FB11D2C20669F10B1CC0C07
-:101B50000C472C24666EC60260014009FF5065F1F7
-:101B60003A8A102AAC188934C0C47F973C18EBA974
-:101B70001BEBA88F359C719B708B209D7408BB025A
-:101B80009B72C08298751BEBA40F08409B730F8853
-:101B90001198777FF70B2F2102284A0008FF022FA8
-:101BA0002502C0B4600004000000C0B07E97048F1E
-:101BB000362F25227D970488372825217C9736C02B
-:101BC000F1C0900AF9382F3C200909426490861927
-:101BD000EB7618EB7728967E00F08800A08C00F05A
-:101BE0008800A08C00F08800A08C2A629D2DE4A2C1
-:101BF0002AAC182A669D89307797388F338A321835
-:101C0000EB8007BE0B2C2104B4BB04CC1198E0C0C0
-:101C10008498E1882B9DE59AE69FE71AEB78099F67
-:101C20004006FF110FCC020A880298E2C1FC0FCCDB
-:101C3000022CE604C9B82C200C1EEB670CCA11AEAE
-:101C4000CC06AA0829A2852DC4CF09B90B29A685DF
-:101C5000CF5CC020D10FC081C0900F8938C0877978
-:101C6000880263FF7263FF6600CC57DA20DB30DC4A
-:101C7000405813FDC020D10FDA2058148D63FFE8BF
-:101C8000C0A063FE82DA20C0B658148963FFD90071
-:101C9000DB402A2C74580CFCD2A0D10F8A102B21C7
-:101CA000045813171EEB44C0D02D246663FEB10008
-:101CB0006C1006D62019EB3F1EEB4128610217EB92
-:101CC0003E08084C65805F8A300A6A5169A3572B29
-:101CD000729E6EB83F2A922668A0048C607AC9343E
-:101CE0002A729D2C4CFECAAB2B600CB64F0CBD115A
-:101CF000A7DD28D2860EBE0A78FB269C112EE2A311
-:101D00002C160068E0052F62007EF91522D285CFDF
-:101D10002560000D00DA60C0B6581465C85A60012D
-:101D20000F00DA60581462655106DC40DB308D30FC
-:101D3000DA600D6D515812D0D3A064A0F384A1C015
-:101D40005104044763FF6D00C0B02C60668931B157
-:101D5000CC0C0C472C64666FC60270960A2B61048B
-:101D60005812E7C0B02B64666550B42A3C10C0E737
-:101D7000DC20C0D1C0F002DF380F0F4264F09019B0
-:101D8000EB0A18EB0B28967E8D106DDA0500A08803
-:101D900000C08CC0A089301DEB1A77975388328C15
-:101DA000108F3302CE0BC02492E12261049DE00427
-:101DB00022118D6B9BE59FE798E61FEB1009984079
-:101DC0000688110822020FDD02C18D9DE208220261
-:101DD00092E4B4C22E600C1FEB000CE811A7882C13
-:101DE0008285AFEE0C220B2BE4CF228685D2A0D1C8
-:101DF0000F28600CD2A08C1119EAF80C8D11A9885B
-:101E0000A7DD2ED2852B84CF0ECC0B2CD685D10FFF
-:101E1000C0F00ADF387FE80263FF6C63FF600000F8
-:101E20002A6C74C0B2DC20DD405812C5C0B063FF1C
-:101E300063C020D10F0000006C10042920062A2264
-:101E40001EC0392C221D232468C0307AC107DDA0B2
-:101E5000600004000000C0D06E9738C08F2E0A804A
-:101E60002B2014C0962924060EBB022E21022B24FF
-:101E7000147E8004232502DE307AC10EC8ABDBD08D
-:101E8000DA202C0A00580B062E21020E0F4CC8FE39
-:101E90006000690068956528210208084C65805C2F
-:101EA0001AEAC61EEAC42BA29EC09A7B9B5E2BE256
-:101EB0002668B0048C207BC95329A29D1FEAC16407
-:101EC000904A9390C0C31DEAD52B21049D9608BB70
-:101ED000110CBB029B979B911CEAD2C08523E4A204
-:101EE0002BA29D2824068DFA282102B0DD2BBC30C0
-:101EF0002BA69D9DFA0C8802282502C8D2C020D1AD
-:101F00000F8EF912EAC82E2689C020D10FDA20C020
-:101F1000B65813E7C020D10F6C10062A2006941083
-:101F200068A80528ACF965825029210209094C6589
-:101F3000920ACC5FDB30DA208C1058134BC051D39F
-:101F4000A0C7AF9A3AC0D01CEA9D14EAA31EEA9C2F
-:101F50008F3A16EA99B1FB64B13128629E6F88020C
-:101F60006001ED294C332992266890078A2009AA3E
-:101F70000C65A1DC2A629DC08E64A1D42B200C0CC0
-:101F8000B7110677082972867983026001CD0CB9F2
-:101F90000A2992A36890082C220009CC0C65C1BBC9
-:101FA0002772856471B5282006288CF96481E52C98
-:101FB00020668931B1CC0C0C472C24666EC60260B9
-:101FC00001A109F85065819B2A21048CE488361E02
-:101FD000EA7D088914A9CC08084709881019EA92F3
-:101FE0000ECC029C7099718C2A1EEA9008CC020ECD
-:101FF000CC029C722E302C293013283012049910F8
-:102000000688100CEE109F740EAE0209880208EECE
-:10201000029E738C3704AA119C758938C0F4997696
-:102020008839C0C1987718EA828E359C7B9E780EDD
-:102030008E1408EE029E7A8E301CEA7177E73088A3
-:102040003289339C7C9F7D0E9C4006CC118F2B29BE
-:1020500076132D76112876120CAA0218EA68C1C9E7
-:102060000CAA022A761008FF029F7EC0AA60000117
-:10207000C0A6A4BC0CB911A6992892852DC4CF087E
-:10208000A80B289685655100C020D10F2B200C0C81
-:10209000B7110677082A72860CB90A6FA902600187
-:1020A000182992A36890082A220009AA0C65A109A0
-:1020B0002A728564A1032C203D0C2C4064C08C8CBA
-:1020C000350C8C1464C0848FE57CF37F8C360C8CCB
-:1020D0001464C0777CF374283013C0FC78F86CC0AB
-:1020E00090292467090C4765C0D719EA4718EA45C3
-:1020F0008F208C3508FF110C8C1408FF0288E49F98
-:10210000A1AC8C09CC029CA08C369FA30C8C14AC87
-:102110008809880298A218EA3DA4BC2F72852DC4B4
-:10212000CF2FFC102F76852F210229207208FF0265
-:10213000B2992924722F2502C020D10F00CC57DA82
-:1021400020DB308C105812C8C020D10FC09163FF23
-:102150008FDA20C0B658135663FFE100DA20581317
-:102160005463FFD82B21045811E61EEA152B200CCE
-:10217000C0D02D24668F3A63FE4DDA20DB30DC4080
-:10218000DD505813DDD2A0D10F2A2C748B10580BC0
-:10219000BED2A0D10F292138C08879832E8C310C72
-:1021A000FC5064CE222B2104C0C05811D5C0D01ED3
-:1021B000EA048F3A2B200C63FE0DDA2058133C639F
-:1021C000FF7ADA205BFF1CD2A0D10F002C20662BF7
-:1021D0002104B1CC0C0C472C24665811C91EE9F817
-:1021E0002B200CC0D02D24668F3A63FDDA0000004E
-:1021F0006C10089514C061C1B0D9402A203DC04080
-:102200000BAA010A64382A200629160568A8052C9D
-:10221000ACF965C33F1DE9EA6440052F120464F27E
-:10222000A02621021EE9E606064C6562E615E9E2F3
-:102230006440D98A352930039A130A990C6490CCEA
-:102240002C200C8B139C100CCC11A5CC9C112CC2F7
-:1022500086B4BB7CB3026002D78F100EFE0A2EE25A
-:10226000A368E0098620D30F0E660C6562C2881150
-:102270002882856482BA891364905EDA80D9308CB2
-:10228000201EE9E01FE9E11DE9CE8B138DD4D4B007
-:102290007FB718B88A293C10853608C6110E660229
-:1022A0009681058514A5D50F550295800418146DE7
-:1022B0008927889608CB110888140EBB02A8D82954
-:1022C0009C200F88029BA198A088929BA308881449
-:1022D000A8D80F880298A22AAC1019E9CCC0C08FE8
-:1022E000131EE9BD86118D10286285AEDD08FF0B37
-:1022F0002CD4CF2821022F66858B352A207209889D
-:1023000002ABAA2825022A2472C020D10F29529E8E
-:1023100018E9A96F980260020B28822668800829B4
-:10232000220008990C6591FC2A529DC1CE9A126434
-:10233000A1F22B200C2620060CB8110588082D824E
-:10234000860EBE0A7DC3026002052EE2A368E00885
-:102350002F22000EFF0C65F1F6288285D780DE80E3
-:102360006482009816266CF96462012C206688311C
-:102370002CCC010C0C472C24666EC6026001BC08F4
-:10238000FD5065D1B61DE9AB1CE98F19E9962A21EC
-:10239000048B2D2830102F211D0C88100BFB090AEF
-:1023A00088020988020CBB026441529B709D71989F
-:1023B00072C04D8D35D9E064D06ED730DBD0D830C7
-:1023C0007FD714273C10BCE92632168C3996E69C40
-:1023D000E78A37B4382AE6080B131464304A2A8295
-:1023E0001686799A9696978C778A7D9C982B821779
-:1023F0002C7C209A9A2A9C189B99867BB03B298C2E
-:10240000086DB9218BC996A52692162AAC18B899E1
-:102410009BA196A08BC786CD9BA22B921596A49BC1
-:10242000A386CB2CCC2026A605C0346BD4200D3B34
-:102430000C0DD8090E880A7FB705C0909988BC8812
-:10244000C0900B1A126DAA069988998B288C18C017
-:10245000D01BE97A1CE97916E96EB1FF2A211C2309
-:10246000E6130F0F4F26E6122F251D7FA906C0F099
-:10247000C08028251D05F6111AE9678F202BE61567
-:102480002CE6162DE61726E6180AFA022AE6142983
-:102490002006299CF96490F829200C8D14C0801A1C
-:1024A000E94E0C9C11AA99A5CCDA202BC285289460
-:1024B000CF0B4B0B2BC685C0B08C155811BBD2A0CF
-:1024C000D10F8A356FA546D8308BD56DA90C8A8679
-:1024D0000A8A14CBA77AB335288C10C080282467C9
-:1024E000080B4765B10BDA20DB302C12055811DEE2
-:1024F000D3A0C0C1C0D02DA4039C1463FD22863696
-:102500006461059B709D719872C04D63FEA4C0818B
-:1025100063FFC9008814CC87DA20DB308C15581192
-:10252000D2C020D10FDA20C0B658126163FFE40098
-:1025300000DA208B1058125E63FFD8009E178A12B3
-:102540002B21045810EF8E17C09029246663FE34A7
-:10255000C08063FE06DA20DB308C15DD505812E6B1
-:10256000D2A0D10FDA2058125263FFA7002B2138D6
-:10257000C0A87BAB026001048C310CFC5064CE041B
-:102580008A122B2104C0C098175810DD8E1763FDE6
-:10259000F32D21382DDCFF0D0D4F2D253865DEF78D
-:1025A00028206A7F87050826416460A3C09016E949
-:1025B000141CE9232A200723E61BB1AA0CFD0226DE
-:1025C000E61A2B200A29E61D2DE61E0CBB022BE67F
-:1025D0001C8B260A0A472BE6208B282AE53E2BE691
-:1025E000212924072820062A2064688346B44463EE
-:1025F000FEA5DB30DA208C158D142E0A80C08E28C3
-:10260000246858111FD2A0D10F2E7C4819E8ED2A5A
-:1026100032162B76129D712D761328761489960A20
-:102620002A14AA990C9902997069ED71C14663FD4B
-:102630008100000064AFB51DE8E22C20168DD20A9F
-:10264000CC0C00D10400CC1AACBC9C2963FF9D00CB
-:102650002B21046EB81E2C2066B8CC0C0C472C2401
-:1026600066C9C09E178A125810A68E17C0348F20D4
-:10267000C0D02D2466C06826240663FF2E8A122B44
-:1026800021042C20669817B1CC0C0C472C246658DA
-:10269000109C8E178716C0D02D246663FCE68D35FE
-:1026A000C08064D04AD9E0DC30DBE0DF301AE8E5F6
-:1026B000B188B4FF16E8E584C92D9DFF87C82CCCEE
-:1026C0001027D63006460127D6320A440117E8DF24
-:1026D00024D631A74727D63324F21596B794B68D62
-:1026E000C3BCBB9DB58D35299C107D83C22F211D98
-:1026F000C14663FD330000006C1006292006289CAB
-:10270000F86582BF2921022B200C09094C6590E154
-:1027100016E8AA0CBA11A6AA2DA2862C0A127DC30D
-:102720000260028C19E8A609B90A2992A3689007E9
-:102730008C2009CC0C65C27829A2856492722D6226
-:102740009E1AE89C6FD80260026E2AA22629160102
-:1027500068A0082B22000ABB0C65B25C29629DC1EF
-:102760008C6492542A21200A806099102C203CC746
-:10277000EF000F3E010B3EB1BD0FDB390BBB098FE4
-:10278000260DBD112DDC1C0D0D410EDD038E27B174
-:10279000DD0D0D410FEE0C0DBB0B2BBC1C0BB7025E
-:1027A0007EC71C2C21257BCB162D1AFC0CBA0C0DD8
-:1027B000A16000093E01073EB1780987390B770A0D
-:1027C00077EB0260020A2C2123282121B1CC0C0CCA
-:1027D0004F2C25237C8B29B0CD2D2523C855DA20FD
-:1027E000DB30581095292102CC96C0E80E9E022EAF
-:1027F0002502CC57DA20DB30DC4058111BC020D139
-:102800000F2C20668931B1CC0C0C472C24666EC687
-:10281000026001D309FD5065D1CD2F0A012E301180
-:1028200029221464E01128221B090C4400C1040071
-:10283000FA1A0A880228261B2E3010C0A0C0B094B5
-:102840001295131CE85F88302CC022088D147787FE
-:1028500004C0F10CFA38C041C0F225203CC0840805
-:1028600058010F5F010F4B3805354007BB10C0F012
-:10287000084F3808FF100FBB0228ECFEC0F0084FCD
-:1028800038842B0BA8100AFF102A21200F88020B76
-:10289000880208440218E86E8F1108440228212596
-:1028A0000A2A140828140488110A88022A21049488
-:1028B000F08B2004E41008BB1104BB02C04A04BB27
-:1028C000029BF1842A08AB110BEB0294F40A541119
-:1028D0000B44020555100D1B4094F707BB100B5518
-:1028E00002085502C08195F68433C05094F3B19428
-:1028F0008B3295F898F99BF2C080C1BC24261499BC
-:10290000FA9BF598FB853895FC843A94FD8B3B9BAC
-:10291000FE883998FF853525F6108436851324F610
-:10292000118B3784122BF612C0B064C07E893077C9
-:1029300097438D3288332E30108F111CE83109995E
-:10294000400699112CF614C0C42CF6158C2B2DF6CC
-:102950001A28F61B2BF61904A81109880208EE02A2
-:1029600019E827C18008EE0209C90229F6162EF6D9
-:1029700018C09E600001C09A2F200C18E8170CFEAA
-:1029800011A8FFA6EE2DE2852BF4CF0D9D0B2DE6B1
-:1029900085C87F8A268929A7AA9A260A990C090937
-:1029A00048292525655050C020D10F00C09A63FFEB
-:1029B000C6DA2058113F63FE38DA20C0B658113C01
-:1029C00063FE2E0068973C2B9CFD64BE24C020D182
-:1029D0000FDA20DB705810F8C0C0C0D10ADA390A0B
-:1029E000DC3865CDE063FE098A102B2104580FC442
-:1029F000C0B02B246663FE21DB402A2C745809A248
-:102A0000D2A0D10FDA20580FC963FCF76C1004C0B4
-:102A100020D10F006C1004290A801EE80E1FE80E5A
-:102A20001CE7E60C2B11ACBB2C2CFC2DB2850FCC7B
-:102A3000029ED19CD0C051C07013E80A14E8091856
-:102A4000E8072AB285A82804240A234691A986B853
-:102A5000AA2AB685A98827849F25649FD10F0000E4
-:102A60006C100AD630283010292006288CF9648290
-:102A70009B68980B2A9CF965A1B2022A02580FABF9
-:102A800089371BE7CFC89164520E2A21020A0C4CE9
-:102A900065C2588D3019E7C874D7052E212365E229
-:102AA0009E2F929E1AE7C46FF8026002532AA22654
-:102AB00068A0082C22000ACC0C65C2442A929D64AE
-:102AC000A23E9A151FE7BE8D67C1E6C8DD2B6206E0
-:102AD00018E7BC64B0052880217B8B432B200C18A1
-:102AE000E7B60CBC11A8CC29C28679EB460FBE0A0A
-:102AF0002EE2A368E0052F22007EF9372CC2859CC8
-:102B00001864C2332B212F87660B7B360B790C6F31
-:102B10009D266ED2462C203D7BC740CE5560001EC0
-:102B20002A200CC1B28C205811229A1864A2458D1B
-:102B30006763FFCFC0C063FFC5D7B063FFD300C0DA
-:102B4000E06000022E60030EDB0C6EB20EDC700C37
-:102B5000EA11AA6A2AAC20580199D7A0DA20DB70C2
-:102B6000C1C82D21205810BC8C268B279A160CBB6F
-:102B70000C7AB3348F18896399F3886298F28E6562
-:102B80009EF82D60108A189D1768D729C0D09DA97E
-:102B90002C22182B22139CAB9BAA97A58E667E73C2
-:102BA00002600097CF5860001FDA208B1658108201
-:102BB00065A13863FFBDC081C0908F18C0A29AF98B
-:102BC00099FB98FA97F563FFD2DB30DA20DC4058A6
-:102BD0001026C051D6A0C0C02BA0102CA4039B1758
-:102BE0002C1208022A02066B02DF702D60038E177A
-:102BF0009D149E100CDD11C0E0AD6D2DDC20580140
-:102C0000188C148B16ACAC2C64038A268929ABAAC9
-:102C10000A990C9A26886609094829252507880CEF
-:102C200098662F2218A7FF2F261863FE96DA20DB5E
-:102C300030DC40DD50581130D2A0D10FC0302C20F4
-:102C4000668961B1CC0C0C472C24666EC60260000C
-:102C5000D2C03009FD5065D0CA8E6764E0696470E7
-:102C600066DB608C18DF70DA202D60038E170CDDB8
-:102C7000119E10AD6D2DDC201EE7755800F923263E
-:102C800018DA208B16DC402F2213DD50B1FF2F26DF
-:102C900013580FC5D2A0D10F0028203D0848406529
-:102CA0008DE76F953EDA308DB56D990C8CA80C8C44
-:102CB00014CACF7CD32D2AAC10C090292467090DEB
-:102CC0004764DDC5600092002C1208066B022D6C73
-:102CD00020077F028E17DA209E101EE75C58007DC9
-:102CE00063FF9A00C09163FFD1000000655081DA54
-:102CF00020DB60DC40580FDCC020C0F02FA403D1E3
-:102D00000FDA20C0B658106A63FFE000006F95022A
-:102D100063FD6CDA20DB30DC40DD50C4E0580F5836
-:102D2000D2A0D10F8A152B2104580EF52324662832
-:102D30006010981763FF2100DA2058105D63FFAB25
-:102D4000C858DB30DA20580F3C2A210265AF9CC0FE
-:102D50009409A90229250263FF91DB30DC40DD5094
-:102D6000C0A32E0A802A2468DA20580F45D2A0D1A9
-:102D70000FC020D10FDA202B200C58107263FF6B8C
-:102D80006C1004282006C062288CF8658125C0508C
-:102D9000C7DF2B221BC0E12A206B29212300A104BD
-:102DA000B099292523B1AA00EC1A0BC4010A0A44E0
-:102DB0002A246B04E4390DCC030CBB012B261B64C5
-:102DC000406929200C1BE6FC0C9A110BAA082FA2C3
-:102DD000861BE6FA6FF9026000B60B9B0A2BB2A3C2
-:102DE00068B0082C22000BCC0C65C0A42BA2851D5A
-:102DF000E71E64B09B8C2B2421040DCC029CB08870
-:102E000020C0C50888110C880298B1882A0844118E
-:102E100098B48F3494B79FB5C0401EE6EF2DA285BD
-:102E20000E9E0825E4CF2DDC282DA6852921020938
-:102E3000094C68941A689820C9402A210265A00BA1
-:102E40002A221E2B221D7AB10265A079C020D10F43
-:102E50002C212365CFDE6000082E21212D21237E29
-:102E6000DBD52B221E2F221D2525027BF901C0B0A8
-:102E700064BFC413E6D02CB00728B000DA20038862
-:102E80000A28824CC0D10B8000DBA065AFE763FF4E
-:102E9000A62A2C74C0B02C0A02580E2F1CE6F49CF3
-:102EA000A08B2008BB1106BB029BA1893499A263A9
-:102EB000FF790000262468DA20DB30DC40DD505842
-:102EC000108ED2A0D10FDA202B200C580FF9C02081
-:102ED000D10F00006C1006073D14C080DC30DB40D1
-:102EE000DA20C047C02123BC3003283808084277C5
-:102EF0004001B1DD64815A1EE6AC19E6AD29E67EDB
-:102F0000D30F6DDA0500508800308CC0E0C020255A
-:102F1000A03C14E6ABB6D38FC0C0D00F87142440BA
-:102F2000220F8940941077F704C081048238C0F1E1
-:102F30000B2810C044C02204540104FD3802520181
-:102F400002FE3808DD10821C07EE100E6E020EDD48
-:102F500002242CFEC0E004FE380AEE100E88020D9A
-:102F600088028DAB1EE69B08D8020E880298B0C07E
-:102F7000E80428100E5E0184A025A125084411084C
-:102F80004402052514045511043402C0810E8E3903
-:102F900094B18FAA84109FB475660C26A11FC0F24D
-:102FA000062614600009000026A120C0F20626149F
-:102FB0000565020F770107873905E61007781008C5
-:102FC000660206550295B625A1040AE611085811B5
-:102FD00008280208660296B7C060644056649053A1
-:102FE000067E11C0F489C288C30B340B96459847FE
-:102FF000994618E6829F410459110E99021FE680F6
-:10300000020E4708D80298420E99029F40C1E00E76
-:10301000990299442FA00CB4380CF91114E66F1ED4
-:10302000E666A4FFAE992E928526F4CF0E880B2873
-:103030009685D10F2BA00C1FE6601CE6670CBE1115
-:10304000ACBBAFEE2DE28526B4CF0D3D0B2DE68552
-:10305000D10FC08005283878480263FEA263FE962F
-:103060006C1006C0C06570F18830C03008871477D6
-:103070008712C0B0C0A619E652299022C030CC9762
-:10308000C031600003C0B0C0A6C0E0C091C0D4C0D1
-:103090008225203C0B3F109712831CC070085801FA
-:1030A0000D5D01089738C0800B98380777100488A9
-:1030B00010086802087702C0800D98382D3CFE0881
-:1030C00088100D9E388D2B0AEE1008EE0207EE02D6
-:1030D0000CB8100FDD02053B400EDD029D4089203B
-:1030E000043D100899110D99022D210409A9020827
-:1030F000DD119941872A05B9100D3D020ABB110D5A
-:10310000BB02087702974428212587120828140457
-:103110008811071E4007EE100E99027566092621D8
-:103120001F062614600006002621200626140868C3
-:10313000029B47098802984629200CD2C0C0800C07
-:103140009E111BE6251FE61CAB99AFEE2DE28528EC
-:1031500094CF0DAD0B2DE685D10FDD40C0A6C0B0DC
-:103160008E51CAE0B2AAB1BB2DDC108F500E78365A
-:10317000981008770C9FD898D989538F5299119934
-:10318000DB9FDA7E8309B1CC255C10C97763FFCF62
-:1031900088108D1108E70C9751AD8DD7F078DB01C1
-:1031A000B1F79D5397528830C03008871408884083
-:1031B000648ED565BEC963FEBC0000006C1004D7E8
-:1031C00020B03A8820C0308221CAA0742B1E2972F8
-:1031D000046D080FC980C9918575B133A2527A3B3D
-:1031E0000B742B0863FFE900649FECD10FD240D130
-:1031F0000F0000006C100AD6302E3027D950DA406C
-:1032000015E5F02430269A1529160464E00264932B
-:10321000732920062A9CF865A3CE2A2102270A04D6
-:103220000A0B4C65B3978C3074C7052D212365D4E8
-:10323000A0C0A62B0A032C2200580F3664A3B9178E
-:10324000E5DE8E389A1664E3BA2F6027285021C92C
-:10325000F37E8311C2B08C202A200C580F55D7A0C2
-:10326000CDA16004A200C2B08C202A200C580F29E6
-:10327000D7A064A4862F212E8B680FBF360FB90C00
-:103280006F9D54296027D5B06E920528203D7B8F15
-:103290004CDA20DB50C1C42D211F580EEF8B269A2B
-:1032A000189A1989272AAC380B990C7A9353896399
-:1032B000C08099738F6298789F728E659E798D67B2
-:1032C0009D7B8C6695759C7A8E687E53026000B1FA
-:1032D0008B1465B050600038DBF063FFA5008A14E2
-:1032E000C9A92E60030E9B0C6EB2A5DC500CEA112E
-:1032F000AA6A2AAC285BFFB1D5A063FF93C0E06344
-:10330000FFE2DA208B18580EAC65A2B163FF9E0075
-:1033100000DA20DB308C15580E54D6A0C0C0C0D1C6
-:103320002D16042CA403DC70DA20DB60DF502D6046
-:1033300003C0E09E109D171EE5B90CDD110D6D0850
-:103340002DDC285BFF478E668F678817AF5FA8A8C4
-:1033500028640375FB01B1EE8A189E669F67892673
-:103360008829AA9909880C99268E6808084805EECC
-:103370000C28252515E5939E6865EECC63FEE600D6
-:103380000000C9432F21232B21212FFC010F0F4FB8
-:103390002F25237FBB026003142C20668961B1CCEA
-:1033A0000C0C472C24666EC60260022809FD50658D
-:1033B000D22264E1B62E602764E1B0DC70DF50DA1F
-:1033C00020DB601EE5AB2D6003C08098100CDD1182
-:1033D000AD6D2DDC285BFF22644181C0442B0A00C7
-:1033E0008C202A200C580ECB0AA70265A00FC0B073
-:1033F0002C22002A200C580EC7D7A064AFEFDA2089
-:10340000C1BCC1C82D21208F188E268929AFEE9E00
-:10341000260E990C090948292525580E8FC090C001
-:1034200050C0C288609A191EE566C0A12EE022082D
-:103430008F14778704C0810E8938C0800B93102DBC
-:10344000203C2921200CDC0104DB010929140BA8F4
-:10345000380CA5380D3D401CE57E8B2B08881007E5
-:1034600055100855020533022821250F154003BBCE
-:10347000020CBB0207551005D3100828140ADD11F1
-:103480000488110988020533022921040833029BAC
-:1034900070C0808A201BE57708AA110BAA029A71D6
-:1034A000C0A1852A9376957408931103DD020ADD85
-:1034B000029D778C63C1DC9C738B6298789A799BB0
-:1034C00072232214C0C0B1352526149C7B9D7593B0
-:1034D0007A2B621A9B7C2A621C9A7D28621D987E38
-:1034E00025621B957F2362172376102D62182D7697
-:1034F000112C62192C761264E0B98E6077E73DC01A
-:10350000FE13E53E1DE53FC1818A628B6304951180
-:103510000E9C4006CC110C5502247615085502C0AD
-:10352000802D76148D2B2B761B2A761A287619255A
-:10353000761803DD022D76166000030000C0FA2E17
-:10354000200C19E52518E51CA9E90CEE11A8EEC020
-:10355000802DE2852894CF0DFD0B2DE685DA208B9A
-:10356000198C158D14580D90D2A0D10FDC70DF503E
-:10357000DB602D6C28C0A01EE53E9A10DA205BFEB1
-:103580005563FE53002B203D0B4B4065BC826FE51D
-:1035900027DA308F556DE90C8EAA0E8E14C9E87E9D
-:1035A000F3162AAC10C090292467090F4764FC6009
-:1035B00060015F00C0FA63FF85C09163FFE8881473
-:1035C000658168DA20DB608C15580DA7C020C0909B
-:1035D00029A403D10F8A162B2104580CC9C0A02A94
-:1035E00024668E6863FDCA00002B9CF965B0FDDA85
-:1035F00020580CCE63FC220000DA20C0B6580E2CF6
-:1036000063FFBA002B200C0CBE11A7EE2DE286C181
-:10361000C27DC30260011819E4E909B90A2992A31D
-:103620006890082A220009AA0C65A10326E2856495
-:1036300060FD2C20668931B1CC0C0C472C24666FC0
-:10364000C60270960C8A162B2104580CADC0D02DE2
-:1036500024668E3077E74D1CE4E91BE4E98F32885D
-:1036600033C0A42D21040E994006991104DD1109DF
-:10367000DD029A61C19009DD029B60C0908B2B9D99
-:10368000649F66986799650CBB029B6228200C1AA0
-:10369000E4D2AA8A0C8811A7882F828529A4CF2F6B
-:1036A000FC202F86858A1465A0A6C020D10FB0FC0F
-:1036B0008B142C2523C8B7022A02066B02580CDE95
-:1036C0002A210265AEF7C0D80DAD022D250263FE9A
-:1036D000EC008E14C8E8DA20DB30580CD72A21021F
-:1036E00065AEDA07AF022F250263FED100DA20DBD8
-:1036F000308C158D14580E80D2A0D10FDA202B20DB
-:103700000C580DEB63FEB600DA202B200C580E0D82
-:1037100063FEAADA20DB308C152D12042E0A8028D5
-:103720000A00282468580CD663FAE500C020D10F9F
-:10373000DA20580DDF8914CD92DA20DB308C155851
-:103740000D4ADBA0C020C0A02AB403D10FC020D1F5
-:103750000F2A2C748B1558064CD2A0D10F000000F4
-:103760006C100E28210224160108084C6583A91F3D
-:10377000E49229F29E6F98026003AD1EE48E29E266
-:10378000266890082A220009AA0C65A39B24F29DB2
-:103790006443952A31160A4B412B240BB4BB0B0B07
-:1037A000472B240C0CB611AF66286286C1CC78C3B7
-:1037B0000260037F19E48209B90A2992A36890077D
-:1037C0008C2009CC0C65C36B276285647365293135
-:1037D00009C0D02D24668C3599139C2A88369C14F8
-:1037E000982B8E3798159E169E2C8C38C0E10C5C59
-:1037F000149C179C2D88392925042E251D28251C4D
-:103800002C3028C0822C243C2930290C0C4708C8B5
-:103810000129243D29311598189912090841089960
-:103820000C299CEC29251F7EC725921C8212282A70
-:1038300000082060991B01023E00093EB128098260
-:1038400039891B0E221102990C821C29251F821C0A
-:10385000941D951E24211F15E4880451609A10C1FF
-:10386000802B1610252014961F05054301063E00E7
-:103870000D3EB16B0DB6398B3C2D9CFC08663606AF
-:10388000441C893D2E26132E26142E26152E246B1D
-:1038900025241406D61CC05025261825261B2524B1
-:1038A000672524682832112525232525242525254B
-:1038B00025252C2925222D25202B252124252E26A2
-:1038C000252F14E46F16E46D1BE45298192D211C6A
-:1038D000C08498719B70892095759577957F967CAB
-:1038E000967E98799B7894731BE46714E4680C388F
-:1038F000400288100C064015E464016610947D9B1C
-:1039000074841D1BE444086602957B18E431851E0F
-:103910000B99029972997A0866022B121096768694
-:103920001F6FD2026001C8C0A0991A6D080AB1AA1F
-:1039300000A10400E81A7D8B0263FFEE891AC0E043
-:10394000961F1DE43E2B1610951E941D28203D2920
-:10395000761A297612C040C051C0B22D76130806DF
-:10396000408D170B8801065E380AEE101BE44A08EA
-:103970005438B0A609661188140B44102B761B042A
-:10398000EE028B1614E44308DA1406EE020D8810DA
-:103990002A761E86131AE41C04EE020D66110866D0
-:1039A000022E76160D14141EE41A0D44110BD814B1
-:1039B0000866020A44022E76182E76102476172600
-:1039C000761FC084287619287611C76F0C24400F03
-:1039D00044111CE3FB26761D26761C2676152676DA
-:1039E000148A262676242676252976222E762028E5
-:1039F00076218E1888150DB91016E4278BC70D880F
-:103A0000110E5E39ADBB851904EE022676230988B6
-:103A100002861F89102876260A04480544110505E8
-:103A2000480E551105440204EE02851E841D2E76B3
-:103A3000272820069B2D29246A2E31172B12102EA1
-:103A40002538CC83C0D02D2407C0D7090840648016
-:103A50008E9A290928416480AA64E0B42D2406C006
-:103A60009809E9362D0AA02A628501C404ADAA2D61
-:103A700021042A668508DD11883F8E3E2732100812
-:103A8000EA1800C40408E8180088110ECE5308771D
-:103A900002C08308DD029D4118E401090D4E9840E3
-:103AA00088209A4397449D4517E3FE1DE3CB058884
-:103AB0001108EE02ADBDC08007EE029E4228D4CFB1
-:103AC0002AF29D87CA2AAC18B1772AF69D1AE3B963
-:103AD00097CA28A4A268711C655060C020D10F004D
-:103AE0002D2406C080C09809E9360E893863FF731B
-:103AF000C0A063FE481BE3CB1AE3EB2AB68963FF41
-:103B0000D600000065EF54C098C0D82D240663FF8E
-:103B1000522D2406C09063FF4ACC57DA20DB308C4C
-:103B200011580C51C020D10F00DA20C0B6580CE05B
-:103B300063FFE500DA20580CDE63FFDC2A2C748B6F
-:103B400011580551D2A0D10F6C10062820068A33D7
-:103B50006F8202600161C05013E39729210216E3CE
-:103B600096699204252502D9502C20159A2814E331
-:103B7000948F2627200B0AFE0C0477092B712064F2
-:103B8000E1398E428D436FBC0260016F00E104B0E9
-:103B9000C800881A08A80808D80298272B200668A9
-:103BA000B32ECE972B221E2C221D0111027BC901A0
-:103BB000C0B064B0172CB00728B000DA2003880A20
-:103BC00028824CC0D10B8000DBA065AFE7C020D1BC
-:103BD0000F2D206464DFCA8B29C0F10BAB0C66BFCC
-:103BE000C02B200C0CBC11A6CC28C2862E0A08784B
-:103BF000EB611EE3720EBE0A2EE2A368E0052822E6
-:103C0000007E894F29C2851EE37E6490461FE38CA7
-:103C10009E90C084989128200A95930F88029892CC
-:103C20008E200FEE029E942F200788262F950A984B
-:103C3000969A972E200625240768E3432921022A15
-:103C4000C2851DE3652AAC20ADBD25D4CF2AC6852B
-:103C500063FF4E002E2065CBEDC082282465C9F697
-:103C600005E4310002002A62821BE36D2941020B48
-:103C7000AA022A668209E43129210263FF23000097
-:103C800064DFB88F422E201600F1040DEE0C00EE1A
-:103C90001AAEAE9E2963FFA38A202B3221B1AA9AC5
-:103CA000B0293221283223B4992936217989A92BC8
-:103CB00032222B362163FFA0C020D10F9F2725245D
-:103CC00015ACB82875202B2006C0C12EBCFE64E0C0
-:103CD000AB68B7772DBCFD65DEC72D2064C0F064EE
-:103CE000D0868E290EAE0C66E089C0F128205A28B5
-:103CF0008CFE08CF3865FEE863FF580000E00493AF
-:103D000010C0810AF30C038339C78F08D80308A8B1
-:103D10000108F80C080819A83303C80CA8B82875BE
-:103D200020030B472B24158310CBB700E104B0BC54
-:103D300000CC1AACAC0CDC029C27659E5EC0B20BBA
-:103D4000990209094F29250263FE50002D206A0DB2
-:103D50002D4165DF7EDA20C0B0580CA864AF18C0D2
-:103D6000F163FEEF9F2763FFD02E221F65EE3263C3
-:103D7000FF79000028221F658E2763FF6E25240629
-:103D800029210263FE1B00006C10066571332B4C69
-:103D900018C0C7293C18C0A1C08009A8380808422B
-:103DA0006481101CE3011AE3022AC67E2A5CFDD35B
-:103DB0000F6DAA0500B08800908C8940C0A00988CA
-:103DC000471FE32B080B47094C50090D5304DD1026
-:103DD000B4CC04CC100D5D029D310CBB029B30882D
-:103DE000438E2098350FEE029E328D26D850A6DDE8
-:103DF0009D268E40C0900E5E5064E0971CE3111E1D
-:103E0000E300038B0BC0F49FB19EB02D200A99B341
-:103E10000CDD029DB28F200CFF029FB48E262D2058
-:103E2000079EB68C282DB50A9CB72924072F20069B
-:103E30002B206469F339CBB61DE2E22320168DD224
-:103E40000B330C00D10400331AB48DA3C393292281
-:103E5000200C13E2E11FE2D80C2E11AFEEA32229B1
-:103E600024CF2FE285D2A00FDD0B2DE685D10F00E8
-:103E70002E200CB48C0CEB111FE2D81DE2CFAFEE5C
-:103E8000ADBB22B28529E4CF02C20B22B685D2A0F7
-:103E9000D10F00002E200C1CE2C81FE2CF0CEB114A
-:103EA000AFEEACBB22B28529E4CF02820B22B685ED
-:103EB000D2A0D10FC0D00BAD387DC80263FEEC6339
-:103EC000FEE08E40272C747BEE12DA70C0B32C3CDF
-:103ED00018DD50580A9B8940C08063FEE3066E02DD
-:103EE000022A02DB30DC40DD505800049A10DB501F
-:103EF000DA70580465881063FEF700006C100692B3
-:103F0000121EE2B98C40AE2D0C8C472E3C1804CA10
-:103F10000BD9A07DA30229ADF875C302600084C04F
-:103F2000B0C023C0A09D106D0844B89F0EB80A8D84
-:103F3000900EB70BB8770D6D36ADAA9D800D660C4F
-:103F4000D8F000808800708C879068B124B2227706
-:103F5000D3278891C0D0CB879890279C1000708879
-:103F600000F08C9D91CB6FC08108BB0375CB36638D
-:103F7000FFB4B1222EEC1863FFD485920D770C8626
-:103F8000939790A6D67D6B01B1559693959260005C
-:103F900016B3CC2D9C188810D9D078D3C729DDF85A
-:103FA00063FFC100C0238A421BE2C000CD322D4412
-:103FB000029B3092318942854379A1051EE2BC0EF5
-:103FC000550187121BE2AB897095350B9902993226
-:103FD00088420A880C98428676A6A696768F44AFC9
-:103FE000AF9F44D10F0000006C10089311D63088A9
-:103FF00030C0910863510808470598389812282165
-:1040000002293CFD08084C6581656591628A630A56
-:104010002B5065B18B0A6F142E0AFF7CA60A2C2048
-:104020005ACCC42D0A022D245A7FE0026002158961
-:104030002888261FE29F09880C65820F2E200B0F0F
-:10404000EE0B2DE0FE2EE0FF08DD110EDD021EE27C
-:1040500099AEDD1EE2991CE2990EDD010DCC37C14F
-:1040600080084837B88DB488981089601AE2557B6B
-:1040700096218B622AA0219C147BA3179D132A20D2
-:104080000C8B108C20580BCA8C148D13DBA0CEAC7B
-:104090006001C4002E200C1BE2480CEA110BAA0898
-:1040A0002BA2861FE2467BDB3B0FEF0A2FF2A368B1
-:1040B000F0052822007F892C2BA28564B0AA876294
-:1040C0008826DE700C7936097A0C6FAD1C8F279B21
-:1040D0001508FF0C77F3197E7B729D139C149B15BA
-:1040E000CF56600025C0B063FFD0D79063FFDD00DE
-:1040F000009D139C14DA20DB70580B2F8B158C1449
-:104100008D1365A06A8E6263FFCC00DA208B11DC10
-:1041100040580AD5D6A08B15C051DE70DA20DC607D
-:10412000DD405BFF768D138C14D9A02E200C1BE292
-:10413000221FE2290CEA11AFEFC0E0ABAA2BA28547
-:104140002EF4CF0B990B29A68563FF1D00DA20DC26
-:1041500060DD40DE708912282007DF50A9882824FE
-:10416000075BFF09D2A0D10F00DBE0DA20580B502B
-:104170006550EF2A20140A3A4065A0EBDB60DC4072
-:10418000DD30022A025809BCD6A064A0D584A183E0
-:10419000A00404470305479512036351C05163FE11
-:1041A0005C2C2006D30F28CCFD6480A568C704C012
-:1041B000932924062C2006C0B18D641FE2019D279F
-:1041C0009D289D298FF29D2600F10400BB1A00F066
-:1041D00004B0BE0EDD01C0F0ADBB8D652F24070D10
-:1041E0000E5E01EE11AEBB2E0AFEB0BB0B0B190E1C
-:1041F000BB36C0E20B0B470EBB372B241618E1F978
-:104200000A09450D0B422B240B29240AB4BE2E2487
-:104210000C7D88572920162FCCFDB09D0A5C520DCD
-:10422000CC362C246465FDEC0C0C4764CDE618E11B
-:10423000E48E2888820C9F0C00810400FF1AAFEEE8
-:104240009E2963FDCF1CE21163FE13001CE20B6389
-:10425000FE0C8D6563FFA500DA202B200C580B396E
-:10426000645F0FC020D10F00C020D10FC09329245C
-:1042700016C09363FFA000006C1004C06017E1CD6E
-:104280001DE1D0C3812931012A300829240A78A1EF
-:1042900008C3B27BA172D260D10FC0C16550512654
-:1042A00025022AD0202F200B290AFB2B20142E2098
-:1042B0001526241509BB010DFF0928F1202B241414
-:1042C000A8EE2EF52064A0A92B221E28221D011184
-:1042D000027B8901DB6064B0172CB00728B000DADC
-:1042E0002007880A28824CC0D10B8000DBA065AF74
-:1042F000E7DB30DC40DD50DA205800DE29210209FE
-:104300000B4CCAB2D2A0D10F00CC5A2C30087BC1C2
-:10431000372ED02064E02D022A02033B02DC40DD70
-:10432000505800D4D2A0D10F2B2014B0BB2B241492
-:104330000B0F4164F0797CB7CAC0C10C9C022C25DC
-:1043400002D2A0D10FC020D10F2E200669E2C126D3
-:1043500024062B221E2F221D29200B2820150D9903
-:10436000092A9120262415AA882895207BF14960E6
-:104370000048B0BB2B24140B0A4164A0627CB70236
-:104380002C25022B221E2C221DD30F7BC901C0B06D
-:10439000C9B62CB00728B000DA2007880A28824C5A
-:1043A000C0D10B8000DBA065AFE7C020D10F0000BB
-:1043B000262406D2A0D10F0000DB601DE18164BF7E
-:1043C0004F2CB00728B000DA2007880A28824CC09A
-:1043D000D10B8000DBA065AFE71DE17963FF310001
-:1043E00026240663FF9C00006C1004282006260A81
-:1043F000046F856364502A2920147D9724022A02C1
-:10440000DB30DC40DD50580019292102090A4CC874
-:10441000A2C020D10FC0B10B9B022B2502C020D11E
-:104420000F00022A02033B022C0A015800D1C9AA3C
-:10443000DA20DB30DC40580A0C29A011D3A07E978B
-:10444000082C0AFD0C9C012CA411C0512D2014062F
-:10445000DD022D241463FFA4DA20DB30DC40DD50C4
-:10446000C0E0580987D2A0D10F0000006C100616DA
-:10447000E1521CE152655157C0E117E14E2821027B
-:104480002D220008084C6580932B32000B695129BE
-:104490009CFD6590872A629E6EA84C2A722668A0B1
-:1044A000027AD9432A629DCBAD7CBE502B200C0CE6
-:1044B000BD11A6DD28D2862F4C0478FB160CBF0A4E
-:1044C0002FF2A368F0052822007F89072DD285D31B
-:1044D0000F65D0742A210419E17AD30F7A9B2EDA62
-:1044E00020580883600035002D21041BE1757DBB39
-:1044F00024DA20C0B658087ECA546001030B2B5042
-:104500002B240BB4BB0B0B472B240C63FFA0DA202E
-:10451000580A67600006DA20C0B6580A656550E0A0
-:10452000DC40DB302D3200022A020D6D515808D2DA
-:104530001CE123D3A064A0C8C05184A18EA00404B0
-:10454000470E0E4763FF3500002B2104C08B8931D5
-:10455000C070DF7009F950098F386EB8172C2066CB
-:10456000AECC0C0C472C24667CFB099D105808E44B
-:104570008D1027246694D11EE126B8DC9ED06550AC
-:1045800056C0D7B83AC0B1C0F00CBF380F0F42CBFD
-:10459000F119E10518E10728967EB04BD30F6DBAEB
-:1045A0000500A08800C08C2C200CC0201DE10B0C45
-:1045B000CF11A6FF2EF285ADCC27C4CF0E4E0B2E09
-:1045C000F685D10FC0800AB83878D0CD63FFC1001E
-:1045D0008E300E0E4763FEA12A2C742B0A01044D67
-:1045E000025808D72F200C12E0FC0CF911A699A252
-:1045F000FF27F4CF289285D2A008480B289685D1B2
-:104600000FC020D10F0000006C1004C060CB55DB40
-:1046100030DC40055D02022A025BFF942921020979
-:10462000084CC882D2A0D10F2B2014B0BB2B24146D
-:104630000B0C41CBC57DB7EBC0C10C9C022C2502F5
-:10464000D2A0D10F0000022A02033B02066C02C076
-:10465000D0C7F72E201428310126250228240A0F5E
-:10466000EE012E241458010E63FFA300262406D267
-:10467000A0D10F006C1006282102D62008084C6536
-:10468000809D2B200C12E0CC0CB811A2882A8286C7
-:10469000B5497A930260009719E0C909B90A2992CD
-:1046A000A36890082A620009AA0C65A08228828566
-:1046B0001CE0D46480799C80B887B14B9B819B10AF
-:1046C000655074C0A7D970280A01C0D0078D380D75
-:1046D0000D42CBDE1FE0B51EE0B62EF67ED830D3FD
-:1046E0000F6D4A0500808800908C2E3008C0A00015
-:1046F000EE322E740028600C19E0B80C8D11A2DD8A
-:10470000A988C0202CD2852284CFD2A00CBC0B2C2F
-:10471000D685D10FC0F0038F387FA0C063FFB400EF
-:10472000CC582A6C74DB30DC4058080BC020D10F09
-:10473000DA605809DF63FFE7DD402A6C74C0B0DC43
-:104740007058087F2E30088B1000EE322E7400282F
-:10475000600C19E0A10C8D11A2DDA988C0202CD21B
-:10476000852284CFD2A00CBC0B2CD685D10F0000A3
-:104770006C1004292014282006B19929241468817A
-:1047800024C0AF2C0A012B21022C24067BA004C0DC
-:10479000D02D2502022A02033B02044C02C0D0584D
-:1047A00000C0D2A0D10FC020D10F00006C1004298E
-:1047B0003101C2B429240A2A3011C28378A16C7B4A
-:1047C000A1696450472C2006C0686FC562CA572D86
-:1047D00020147CD722DA20DB30DC40DD505BFFA5E3
-:1047E000292102090E4CC8E2C020D10FC0F10F9F51
-:1047F000022F2502C020D10FDA20DB30C0C05BFFC2
-:10480000DC28201406880228241463FFC7292015F9
-:104810001BE06C2A200BC0C09C240BAA092BA120F2
-:104820002C2415AB9929A52063FF9900C020D10F36
-:10483000DA20DB30DC40DD50C0E0580891D2A0D156
-:104840000F0000006C1004CB5513E06725221F0DEC
-:10485000461106550CA32326221E25261F06440BAF
-:1048600024261E734B1DC852D240D10F280A80C087
-:104870004024261FA82828261E28261DD240D10FF6
-:10488000C020D10F244DF824261E63FFD80000005D
-:104890006C1004D620282006C0706E85026000D4FB
-:1048A0001DE04E19E04612E0442A8CFC64A1302B36
-:1048B0006102B44C0B0B4C65B0A22B600C8A600CEF
-:1048C000B8110288082E828609B90A7EC3026000E8
-:1048D0009A2992A368900509AA0C65A08E28828562
-:1048E000648088B8891BE04A94819B80655155C0DB
-:1048F000B7B8382A0A01C0C009AC380C0C4264C0F1
-:10490000421FE0291EE02B2EF67EB04AD30F6DAA7F
-:104910000500808800908CC0A029600C0C9C11A21E
-:10492000CC2BC285AD990B4B0B2BC6852860062777
-:1049300094CF6881222D6015D2A0C9D2C0E22E6426
-:1049400006D10F00C0F008AF387FB0BD63FFB100E3
-:10495000276406D2A0D10F00D2A0D10F00CC57DA25
-:1049600060DB30DC405808C0C020D10FDA60580945
-:104970005063FFE80028221E29221DD30F789901D9
-:10498000C080C1D6C1C11BE018C122AB6B6480429C
-:1049900078913F2A80000CAE0C64E0BB02AF0C643F
-:1049A000F0B52EACEC64E0AF0DAF0C64F0A92EAC0A
-:1049B000E864E0A32FACE764F09D2EACE664E097DA
-:1049C0002F800708F80BDA807B83022A8DF8D8A0A5
-:1049D00065AFBC28612308D739D97060007B00001F
-:1049E0002B600C0CB811A2882C82862A0A087CAB9A
-:1049F0007E09BA0A2AA2A368A0052C62007AC96FB0
-:104A00002A828564A0691FDFFE276504C0E3C0C455
-:104A10002E64069CA11CE02B9FA02E600A97A30C7D
-:104A2000EE029EA28F600CFF029FA42E60147AEF0C
-:104A30004627A417ADBC2F828527C4CF2FFC202F7B
-:104A4000868563FE692A6C74C0B1DC90DD4058072E
-:104A5000BC1DDFE163FEC100D9A0DA60DB30C2D04B
-:104A6000C1E0DC4009DE39DD50580805D2A0D10F85
-:104A7000DA6058090F63FEE4290A0129A4170DBF63
-:104A8000082E828527F4CF2EEC202E868564500BCD
-:104A90002A6C74DB4058017CD2A0D10FC020D10F0A
-:104AA0006C10062B221E28221D93107B8901C0B09A
-:104AB000C0C9C03BC1F20406401DDFCBC0E2C074D8
-:104AC0000747010E4E01AD2D9E11C0402E0A146401
-:104AD000B06E6D084428221D7B81652AB0007EA13E
-:104AE0003B7FA1477B51207CA14968A91768AA1484
-:104AF00073A111C09F79A10CC18B78A107C1AE2908
-:104B00000A1E29B4007CA12B2AB0070BAB0BDAB02C
-:104B10007DB3022ABDF8DBA0CAA563FFB428B0109C
-:104B200089116987BB649FB863FFDC00647FB4634D
-:104B3000FFD50000646FD0C041C1AE2AB40063FF4E
-:104B4000C62B2102CEBE2A221D2B221E7AB12A8C10
-:104B5000107CB1217AB901C0B0C9B913DF96DA204F
-:104B600028B0002CB00703880A28824CC0D10B80E3
-:104B700000DBA065AFE7D240D10F8910659FD463F9
-:104B8000FFF300006C1008C0D0C8598C30292102F6
-:104B90000C0C4760000C8E300E1E5065E19E2921E2
-:104BA00002C0C116DF85090B4C65B0908A300A6ED1
-:104BB0005168E3026000852F629E1BDF7E6EF85312
-:104BC0002BB22668B0052E22007BE94727629DB7ED
-:104BD00048CB7F97102B200CB04E0CBF11A6FF299D
-:104BE000F2869E12798B4117DF7507B70A2772A3E9
-:104BF000687004882077893029F285DF90D7906526
-:104C000090652A210419DFAE7A9B22DA205806B873
-:104C1000600029002C21041BDFAA7CBB18DA20C00D
-:104C2000B65806B3C95860014CC09063FFCCDA2077
-:104C300058089F600006DA20C0B658089D655135B7
-:104C4000DC40DB308D30DA200D6D5158070BC0D0C1
-:104C5000D3A064A120292102C05184A18CA0040406
-:104C6000470C0C4763FF3E00C09B8831DBD008F83F
-:104C700050089B3828210498116E8823282066ACA0
-:104C80008C0C0C472C24667CBB159F139E148A1039
-:104C90008B1158071B8E148F13C0D02D24668A30B9
-:104CA000C092C1C81BDF5B7FA6099BF099F12CF471
-:104CB0000827FC106550A4B83ADF70C051C08007C7
-:104CC000583808084264806718DF3819DF392986A8
-:104CD0007E6A420AD30F6DE90500A08800F08CC0FF
-:104CE000A08930B4E37F9628C0F207E90B2C940822
-:104CF0009B909F912F200C12DF380CF811A6882969
-:104D00008285A2FF2DF4CFD2A009330B238685D153
-:104D10000F22200C891218DF300C2B11A6BBA82201
-:104D20002D24CF2CB285D2A00C990B29B685D10F9A
-:104D3000C087C0900A593879809663FF8ADB30DAE1
-:104D400020C0C1C0D05BFF56292102C0D02A9CFEE2
-:104D500065AE4D2D2502C09063FE45009E142A2CA1
-:104D600074C0B1DC70DD405806F68E14C0D01BDF75
-:104D700028C1C863FF6AC020D10F00006C1006284C
-:104D8000210217DF0D08084C65824929729E6F9831
-:104D90000260025019DF082A922668A0078B200AB9
-:104DA000BB0C65B23F2A729DC0CB64A2371DDF04E5
-:104DB000C0602B3008C0F164B0712E0AFFB0B86437
-:104DC00081512DBCFE64D0F364505C2A2C74044BDA
-:104DD000025800AD0AA2020600000000001ADF0817
-:104DE0002C20076EBB0260022218DEFE13DF081BB8
-:104DF000DF36C0E229200A9AD09ED1ABCB039902BC
-:104E000099D223B08026B480B13308330293D318EB
-:104E1000DEF20CFD11A7DD2CD285A8F82684CF0C7C
-:104E2000EC0B2CD685655FA2C020D10F2B21048806
-:104E300031DE6008F85008CE386EB8102C2066B10C
-:104E4000CC0C0C472C24667CEB026001AF2E30109A
-:104E50002930112C301300993200CB3264E1452AFD
-:104E600030141EDF1A00AA3278CF050E9C092BC41D
-:104E70007F1CDF1766A0050E98092A8480B4A71846
-:104E8000DF15C76F009104AC9CDBC000AE1A00F3C5
-:104E90001A6EC1048BD00BCB0C1CDF0F08B81C069C
-:104EA0003303AC882A848B2CD03627848C03CC0126
-:104EB0000ECC022CD4365801AD63FF0B2F200C0C06
-:104EC000FB11A7BB2DB286C0987D9302600121190A
-:104ED000DEBB09F90A2992A36890082D220009DD9A
-:104EE0000C65D10C2DB285DE6064D10488312B2194
-:104EF0000408F85008CE386FB80263FEDF2C206635
-:104F0000B1CC0C0C472C24667CE30263FECE9D10D2
-:104F100060013100293108292504283014B0886443
-:104F200080A62B31092B240AC0812B30162FD423C5
-:104F30002B240BB4BC2C240C8D378B36292504DE96
-:104F4000D00D8E39DCB00B8C390ECC0264CE7808D3
-:104F50009C1101C4048F380DBE1800C4040DB8188C
-:104F600000881108FF02C08308CC0218DECC9CA187
-:104F700098A018DECB8C209EA39FA405CC110BCF4C
-:104F800053C1E09EA50CFF0208FF029FA218DE8914
-:104F90002624662C729D2684A22CCC182C769D6328
-:104FA000FE250000002D30121CDECD00DA3278DF45
-:104FB000050C9E0B2AE47F66B0050C9F0B2BF4803A
-:104FC0002A301100AA3263FEEC2E240A2B31099BF1
-:104FD0002B63FF5300CC57DA20DB30DC405807222C
-:104FE000C020D10F00DA20C0B65807B163FFE5003A
-:104FF00000DBF0DA205807AE63FFD9000058064006
-:105000001DDE70C0F126246663FE41008B20280A55
-:10501000FFB1CE23200A2C21040E0E472E24077840
-:1050200031359AD02CD50A96D319DEA62ED416C0C7
-:105030008398D1C0E309B80298D409390299D226DD
-:10504000240763FDC958062E8D102624662B2104E3
-:105050002F200C63FD86000008B81119DE6808EEE9
-:1050600002882B9ED59AD0C0EF09880298D204C935
-:10507000110E990299D4C0E49ED163FFC1000000D3
-:105080006C1004C020D10F006C100485210D381164
-:1050900014DE478622A42408660C962205330B935F
-:1050A00021743B13C862D230D10FC030BC29992182
-:1050B00099209322D230D10F233DF8932163FFE34F
-:1050C0006C100AD620941817DE3CD930B8389819DD
-:1050D0009914655256C0E1D2E02E61021DDE390EF0
-:1050E0000E4C65E1628F308E190F6F512FFCFD65FC
-:1050F000F1558EE129D0230E8F5077E66B8F181E65
-:10510000DE78B0FF0FF4110F1F146590CE18DE7516
-:105110008C60A8CCC0B119DE2728600B09CC0B0D20
-:10512000880929812028811E2A0A0009880C08BACA
-:10513000381BDE6B0CA90A2992947B9B0260008CC1
-:105140002B600C94160CBD11A7DD29D286B84879C6
-:1051500083026000D219DE1909B80A2882A39817C1
-:105160006880026000A36000A51ADE5F84180AEE62
-:1051700001CA981BDE108C192BB0008CC06EB313C3
-:105180001DDE0D0C1C520DCC0B2DC295C0A17EDB7B
-:10519000AE6000380C0C5360000900000018DE51AE
-:1051A0008C60A8CCC0B119DE0328600B09CC0B0DB4
-:1051B000880929812028811E2A0A0009880C08BA3A
-:1051C000380CA90A2992947E930263FF72DA60C0B8
-:1051D000BA58073764507360026A00001ADDF68C13
-:1051E000192AA0008CC06EA31A18DDF20C1C5208FC
-:1051F000CC0B18DE3B2BC295C0A178B30263FF3FF6
-:1052000063FFC9000C0C5363FF0989607899182962
-:10521000D285C9922B729E1DDDE76EB8232DD22652
-:10522000991369D00B60000DDA60580721600017F0
-:105230000088607D890A9A1A29729D9C129915CF5F
-:1052400095DA60C0B658071A6551F98D148C18DBD1
-:10525000D08DD0066A020D6D51580587D3A09A14DF
-:1052600064A1E182A085A1B8AF9F1905054702029C
-:10527000479518C05163FE602B6104C08B8931C013
-:10528000A009F950098A386EB81F2C6066A2CC0CB0
-:105290000C472C64667CAB119F119E1B8A15580528
-:1052A000988E1B8F11C0A02A64669F1164F0E58957
-:1052B0001388190FFD022E0A006DD9172F810300E4
-:1052C000908DAEFE0080889F9200908C008088B800
-:1052D0009900908C65514E8A10851A8B301FDDC85D
-:1052E000881229600708580A2C82942D61040ECC7C
-:1052F0000C2C86946FDB3C1CDDF4AC9C29C0800B2D
-:105300005D50A29909094729C48065D0DA2E600C46
-:10531000C0D01FDDB10CE811AFEEA7882282852D29
-:10532000E4CF02420B228685D2A0D10F8E300E0E22
-:105330004763FDA2A29C0C0C472C64077AB6CD8B68
-:10534000602E600A280AFF08E80C64810E18DDDD73
-:1053500083168213B33902330B2C34162D350AC051
-:105360002392319F30C020923308B20208E80292A3
-:10537000349832C0802864072B600CD2A01CDD96C4
-:105380000CBE11A7EE2DE285ACBB28B4CF0D9D0B52
-:105390002DE685D10F8B1888138D30B88C0D8F4773
-:1053A0000D4950B4990499100D0D5F04DD1009FFEB
-:1053B000029F800DBB029B8165508D851AB83AC053
-:1053C000F1C0800CF83808084264806B1BDD771947
-:1053D000DD7829B67E8D18B0DD6DDA0500A0880075
-:1053E000C08CC0A063FEF30082138B161DDD8828DD
-:1053F000600AC0E02EC4800D880202B20B99239F80
-:1054000020C0D298229D2122600CB2BB0C2D11A786
-:10541000DD28D28508BB0B18DD702BD685A8222E7F
-:1054200024CFD2A0D10F9E1B851A2A6C748B185BD7
-:10543000FF168E1B63FEA300C087C0900AF938795F
-:10544000809263FF86C020D10F9E1B2A6C74C0B16E
-:105450008D1858053B8E1B851A63FE7E886B821360
-:10546000891608BE110ECE0202920B9E25B4991E1B
-:10547000DD639F200E88029822C0EF04D8110E88A9
-:10548000029824C0E49E21C080D2A02B600C286426
-:10549000071CDD510CBE11A7EE2DE285ACBB28B474
-:1054A000CF0D9D0B2DE685D10F0000006C1004C0C0
-:1054B00020D10F006C10048633C071C03060000131
-:1054C000B13300310400741A0462017460F1D10F29
-:1054D0006C1004022A02033B025BFFF61CDD391B41
-:1054E000DD83C79F88B009A903098A019AB0798032
-:1054F0001EC0F00FE4311DDD300002002BD2821EF1
-:10550000DD7C2AC1020EBB022BD6820AE431D10F08
-:1055100028C102C19009880208084F28C50208E482
-:1055200031D10F006C1004C0C00CE43112DD251A1B
-:10553000DD2200020029A28218DD701BDD6E26210B
-:10554000020B990108660129A68226250206E4318C
-:1055500014DD6B15DD66236A9023261685502426FC
-:1055600015252617222C50D10F0000006C1008D6EC
-:10557000102B0A64291AB41ADD0F0D23111CDD103B
-:105580000F2511B81898130E551118DD5DAC55A8EC
-:1055900038AA332C80FF2A80FEA933288D01298068
-:1055A0000108AA112880000CAA02088811098802A3
-:1055B00008AA1C288C0828160458086814DD010A5B
-:1055C000A70224411A2A30802B120407AA2858085F
-:1055D00063B1338B13B4559A6004AC28B4662C566F
-:1055E0002B7B69E016DD3A9412C050C0D017DCF472
-:1055F0009D15D370D4102F60802E60829F169E1749
-:10560000881672891A8D128C402A607F0DCC282B47
-:105610003A200CAA28580851C0B10ABE372E354886
-:105620008F1772F91A8D128C402A60810DCC282BAD
-:105630003A200CAA28580849C0B10ABE372E354A6C
-:10564000B233B444B1556952B6B466C0508F15B880
-:1056500077D370B2FF9F156EF899D10F6C1004C00C
-:1056600021D10F006C1004270A001CDCD31FDCE4DE
-:105670001EDCE71DDCD01ADD141BDD22C02824B09F
-:10568000006D2A75AA48288080C09164806100411D
-:105690000415DCCBC03125503600361A06550105FD
-:1056A00095390C56110C66082962966E974D0D5966
-:1056B0000A29922468900812DD0602420872993B7A
-:1056C00023629512DCC8CB349F300282020E440262
-:1056D000C092993194329233AD52246295C0902495
-:1056E0004C1024669524B0002924A0AA42292480C5
-:1056F000B177B14404044224B400D10FD10FD10FCB
-:105700006C10041ADCAC2AA00058021C5BFFD50206
-:105710002A02033B025BFFD11BDCAAC9A12CB10208
-:10572000C0D40DCC020C0C4F2CB5020CE431D10FBF
-:10573000C0A00AE43118DCA00002002F828219DC2C
-:10574000B32EB10209FF022F86820EE431D10F0081
-:105750006C1004C02002E43114DC9A16DC970002BD
-:1057600000226282234102732F0603E431C020D15C
-:105770000F19DCE61ADCE52841020A2A0109880132
-:105780002A668228450208E43115DCDC12DCE125BA
-:105790004621D10F6C1004292006289CF96480A0B2
-:1057A0002A9CFD65A0968A288D262F0A087AD9049E
-:1057B0002B221FC8BD2C206464C0812E22090EAE8E
-:1057C0000C66E0782B200C1EDC7C0CBC11AECC28C7
-:1057D000C28619DC7A78F3026000AD09B90A299211
-:1057E000A36890082E220009EE0C65E09B29C28573
-:1057F0001FDC846490929F90C0E41FDC919E9128EE
-:10580000200AC0E09E930F8802989288200F880299
-:1058100098942F20079A979D962F950A2E24072853
-:10582000200629206468833328C28512DC6B288C0B
-:1058300020A2B22E24CF28C685C020D10FC020D1EF
-:105840000F2A206A0111020A2A4165AF52DA20C0EC
-:10585000B05805EA64AFE5C021D10F00649FC81FAE
-:10586000DC582D20168FF209DD0C00F10400DD1A42
-:10587000ADAD9D2912DC5928C285A2B22E24CF28B5
-:105880008C2028C685C020D10FC021D10F00000078
-:105890006C1004260A001BDC9F15DC4928206517C4
-:1058A000DC46288CFE6480940C4D110DBD082CD272
-:1058B000F52BD2F42ED2F77CB13DB4BB2BD6F47BC2
-:1058C000E9052BD2F62BD6F47CB92C2AD2F62AD6AF
-:1058D000F52AD6F406E4310002002872822AFAFF83
-:1058E000004104290A012F510200991A0A9903095B
-:1058F00088012876820FE4312624652BD2F48E5C51
-:105900002CD2F5B0EE9E5C7BCB1629D2F62FD2F7C7
-:105910000CB80C09FF0C08FF0C0F2F14C8F960001D
-:10592000320BCA0C0A2A14CEA92B5102C0C20CBBDE
-:10593000020B0B4F2B55020BE431D10F00DB30DA99
-:10594000205BFF941BDC7464AF5D0C4D11ADBD6337
-:10595000FFA8000006E4310002002F728218DC303C
-:105960002E510208FF022F76820EE431D10F000083
-:105970006C1004C03003E43116DC1015DC11000299
-:105980000024628274472118DC64875C084801287F
-:105990006682CD7319DC620C2A11AA99229283299E
-:1059A00092847291038220CC292B51020BE431C0E6
-:1059B00020D10F001FDC5B2E51020FEE012E55028D
-:1059C0000EE431B02DB17C9C5C12DC5608DD112D4B
-:1059D000561DD10F6C10061BDBF71EDBF922B00041
-:1059E0001ADC526F23721DDC39C04818DC511FDCF1
-:1059F0004FDC10D5C083F000808600508A6D4A4F7E
-:105A00000F35110D34092440800B560A296294B1D8
-:105A1000330E55092251480F44110C440A8740099E
-:105A2000A80C02883622514907883608770CA899B5
-:105A30002966949740296295874109A80C02883607
-:105A400007883608770CA899296695974103034281
-:105A5000B13808084298F0D10F1CDC3613DC372728
-:105A6000B0002332B5647057C091C0D016DC351534
-:105A7000DC33C0402AC00003884328C4006D793C51
-:105A8000004104B14400971A7780148E502FB295CC
-:105A90002DB695AFEE2EED2006EE369E5060001826
-:105AA00077A00983509D5023B69560000223B295DC
-:105AB000223D2006223622B695B455B8BBD10F0040
-:105AC00003884328C400D10F6C1004C04004E431A3
-:105AD00015DC1D000200885013DC1CCB815BFFBD70
-:105AE0001CDC1B0C2D11ADCC2BC2822AC28394501E
-:105AF0007BAB142EC28429C2850ABD0C0E990C0DF5
-:105B0000990C0929146000050BA90C092914993076
-:105B100015DBAC2A51020AE4312A2CFC58004B2B2D
-:105B200032000AA2022BBCFF9B30CCB6C8A4D2A084
-:105B3000D10F000004E4311EDBA00002002DE28240
-:105B40002FBAFF2C51020FDD012DE6820CE431D17A
-:105B50000F0000006C1004D10F0000006C1004C096
-:105B600020D10F006C100413DBFAC0D103230923EA
-:105B7000318FC0A06F340260008D19DB8F1BDB906A
-:105B800017DBF30C2811A8772672832572822CFA72
-:105B9000FF76514788502E7285255C0425768275E4
-:105BA000E9052572842576827659292E72842E760F
-:105BB000822E76830AE431000200239282002104BF
-:105BC0002FB10200D61A0C66030633012396820F0A
-:105BD000E43126728325728260000200D8A07659D3
-:105BE000220AE43100020023928200210400D21A2A
-:105BF0002FB1020C22030232012296820FE431D22D
-:105C000080D10F00D280D10FC020D10F6C1004DBE7
-:105C100030862015DB68280A00282502DA2028B003
-:105C2000002CB00705880A28824C2D0A010B800041
-:105C3000DBA065AFE61ADB610A4A0A29A2A3C7BF47
-:105C4000769101D10F2BA6A3D10F00006C1004C0D8
-:105C5000D1C7CF1BDB5B19DB5817DB560C2811A80B
-:105C60007786758574C0A076516288508E77B4555A
-:105C7000957475E903857695747659278F769F75A7
-:105C80009F740AE431000200239282B42E2FB102E5
-:105C900000E10400D61A0C66030633012396820F36
-:105CA000E431867583747639280AE4310002002EC7
-:105CB0009282B42200210424B10200DF1A0CFF03F7
-:105CC0000FEE012E968204E431D280D10FD8A07657
-:105CD00051D6D280D10F00006C1004290A801EDB3F
-:105CE0005D1FDB5D1CDB350C2B11ACBB2C2CFC2DA4
-:105CF000B2850FCC029ED19CD0C051C07013DB592D
-:105D000014DB5818DB562AB285A82804240A234637
-:105D100091A986B8AA2AB685A98827849F25649F59
-:105D2000D10F00006C100419DB8B0C2A11A9A98972
-:105D300090C484798B761BDB79ABAC2AC2832CC2EE
-:105D4000847AC1688AA02BBC30D3A064A05E0B2BE0
-:105D50000A2CB2A319DB4268C0071DDB7FD30F7D7D
-:105D6000C94AA929299D0129901F68913270A6036B
-:105D7000D3A0CA9E689210C7AF2AB6A32A2CFC5B98
-:105D8000FFB3D230D10F000013DB7503A3018C31B8
-:105D90001DDB130C8C140DCC012CB6A363FFDC00AF
-:105DA000C020D10FDA205BFFCCC020D10FC020D1A2
-:105DB0000F0000006C1004DB30C0D019DAFEDA20CE
-:105DC00028300022300708481209880A28824CDC53
-:105DD000200B80001BDAF90C4A11ABAA29A2840916
-:105DE000290B29A684D10F006C1004C04118DAF2E7
-:105DF00017DAF40C2611A727277038A866256286C3
-:105E0000007104A35500441A75414822628415DBD1
-:105E10001502320BC922882117DAF10884140744CD
-:105E200001754905C834C020D10FD10F0809471D9D
-:105E3000DB4AC0B28E201FDADF0E0E43AFEC2BC45C
-:105E4000A00FEE0A2DE6242A6284C0200A990B29AD
-:105E50006684D10FC020D10F6C1004DB30C0D01885
-:105E6000DAD5DA2025300022300708580A28824C7B
-:105E7000DC200B80008931709E121BDACF0C4A1196
-:105E8000ABAA29A28409290B29A684D10F09C952DA
-:105E900068532600910418DACAC0A12F811600AAFF
-:105EA0001A0AFF022F85161EDAC40C4D11AEDD2C26
-:105EB000D2840C2C0B2CD684D10FC0811FDAC1B830
-:105EC0009A0A0A472EF11600A10400881A08EE0269
-:105ED0002EF5161DDAB90C4C11ADCC2BC2840B2B50
-:105EE0000B2BC684D10F00006C1004DB30C0D0191E
-:105EF000DAB1DA2028300022300709880A28824CDB
-:105F0000DC200B80001CDAAC0C4B11ACBB2AB28439
-:105F10000A2A0B2AB684D10F6C1004C04118DAA6E5
-:105F200016DAA80C2711A626266038A87225228624
-:105F3000006104A35500441A7541082222840232EC
-:105F40000BD10F00C020D10F6C100415DB050249E6
-:105F5000142956112452120208430F8811C07300ED
-:105F6000810400361A008104C78F00771A0877036E
-:105F7000074401064402245612D10F006C10066E2D
-:105F800023026000AC6420A7C0A0851013DADD16E0
-:105F9000DAF4C040A6AA2BA2AE0B19416490666841
-:105FA000915D68925268933C2AA2AA283C7F288C73
-:105FB0007F0A0A4D2980012880002AACF208881146
-:105FC0000988027589462B3D0129B0002BB00108D4
-:105FD00099110B99027A9934B8332A2A00B1447284
-:105FE00049B160004A7FBF0715DADF63FFB90000DF
-:105FF000253AE863FFB10000253AE863FFA90000F5
-:10600000250A6463FFA1C05A63FF9C0000705F080B
-:106010002534FF058C142C34FE70AF0B0A8D142E22
-:106020003D012AE4012DE400DA405BFD5063FFA747
-:10603000D10FD10F6C10041ADA6219DA5F1CDACAB8
-:106040001BDACBC080C07160000D00000022A438B4
-:10605000B1AA299C107B915F26928679C2156E6247
-:1060600062C0206D080AB12200210400741A764B28
-:10607000DB63FFEE2292850D6311032514645FCF6D
-:10608000D650032D436DD9039820B4220644146DD5
-:106090004922982098219822982398249825982678
-:1060A000982798289829982A982B982C982D982EDC
-:1060B000982F222C4063FF971EDA4027E68027E6C0
-:1060C00081D10F00C02063FF830000006C1004C06A
-:1060D00062C04112DA3B1ADA3713DA522AA00023DF
-:1060E000322D19DA9F2BACFE2992AE6EA30260000E
-:1060F0008E090E402D1AC2C2CD0EDC392C251A6431
-:10610000B0895BFF9E15DA9A1ADA952B3AE80A3ABB
-:10611000015805922B211A0ABB28D3A09B50580581
-:10612000A92B52000ABB082A0A005805A815DA91C3
-:106130002D21022C3AE80C3C2804DD022D25029C7E
-:10614000505805A08B50AABBC0A15805A01CDA8AE4
-:106150002D21020C3C2806DD0213DA882D25029C35
-:10616000305805988B30AABBC0A25805982A210246
-:10617000C0B40BAA020A0A4F2A25025805ACD10F57
-:10618000242423C3CC2C251A63FF760018DA801C44
-:10619000DA7C19DA7D1BDA7B17DA4F85202E0AFDAF
-:1061A0001FDA7C2D203624F47A24F47E24F4820E27
-:1061B000DD0124F4862E0AF707552806DD02C07596
-:1061C0000EDD01050506AB5BA959C0E8AC5C24C433
-:1061D000AB0EDD0227C4AC2E0ADFA85527B4EC0EA7
-:1061E000DD0124B4EBC2E027942C0EDD0224942BB5
-:1061F0002E0A800D0D4627546C24546B0EDD022DA3
-:10620000243663FEFC0000006C10042A0A302B0ABE
-:10621000035BFF4D12DA53C390292616C3A1C0B306
-:10622000C08A2826175BFF48C03CC3B12B26161A2C
-:10623000D9E42AA02023261764A079C3A2C0B15BA9
-:10624000FF42C3A2C0B15BFF40C3C22C2616C2AF3F
-:10625000C0B12326175BFF3CC28F282616C0FE2F35
-:106260002617C2E22E26162A0AA1C0B1C0D82D26B2
-:10627000175BFF352A0AA12A2616C3A6C0B3C1920E
-:106280002926175BFF31C3C62C2616C1B32A0AA2E2
-:106290002B2617C0B35BFF2C290AA2292616C1851D
-:1062A000282617C2FB2F2616C0E72E26171DDA391F
-:1062B0002D2610D10FC3A2C0B35BFF2363FF820062
-:1062C0006C10041CDA031BD9ED18DA3317DA341614
-:1062D000DA3415DA34C0E0C0D414D9FF1FD9B9C0FC
-:1062E000288FF06D2A36DAC0D9C07C5B020FC90C4A
-:1062F0001CD9F90C9C28A8C3A6C22A36802A25845A
-:10630000A4C2A7CC2D248C2B248A2B24872E248B4B
-:10631000B1BB2E369F2C369E2C369DB1AC1CD9D7E6
-:106320001BDA22C0286D2A33DAC0D9C07C5B020F89
-:10633000C90C1CD9E80C9C28A8C3A6C22A36802BFD
-:106340002584A4C2B1BBA7CC2D248C2E248B2A2457
-:106350008A2E369F2C369E2C369DB1ACC07919D929
-:10636000D81BDA1413DA121ADA1218DA1314D9D97C
-:1063700016DA1304F42812DA1204660C040506A2D5
-:1063800052A858AA5AA3539B3029A50027848AC033
-:1063900091C0A52A848C29848B17DA0B18DA0AA7F6
-:1063A0005726361D26361E2E361F16DA0813DA0833
-:1063B000A65504330C2826C82E75002D54AC2E5437
-:1063C000AB2E54AA2326E62326E52E26E7D10F007E
-:1063D0006C100613D99417D9E224723D2232937FB0
-:1063E0002F0B6D08052832937F8F0263FFF3C0C423
-:1063F000C0B01AD973C051D94004593929A4206EAC
-:1064000044020BB502C3281ED96EDDB025E4220577
-:106410002D392DE421C0501ED9EF19D9DF18D9DF4D
-:1064200016D9E11DD9ED94102A724517D9AB6DA983
-:106430004BD450B3557A5B17DF50756B071FD9608B
-:106440008FF00F5F0C12D9A302F228AE2222D68160
-:10645000D54013D9A0746B0715D95A855005450C42
-:10646000035328B145A73FA832A93322369D2236CF
-:106470009E2436802B369F2BF48B2CF48C14D969F8
-:1064800024424DC030041414C84C6D0806B13304C6
-:106490001414C84263FFF20015D947C44000310408
-:1064A0001AD948C0D193A200DD1AC138B0DD9DA32E
-:1064B00018D95D2B824D29824E29A5202882537A36
-:1064C000871E2C54008E106FE45D12D93D2F2121C0
-:1064D0002321202F251F04330C23252023251ED103
-:1064E0000FC06218D99F88807E87D98910265400F2
-:1064F0006F94191BD9332AB1200A1A1404AA0C2A42
-:10650000B5202AB5212AB51E2AB51FD10F1BD92CBB
-:106510002AB1200A1A1403AA0C2AB5202AB5212A66
-:10652000B51E2AB51FD10F001CD9262BC1212DC1A4
-:10653000202BC51F03DD0C2DC5202DC51ED10F003E
-:106540006C100619D91F14D98612D93615D9A3C7CC
-:106550003FC0E02E56A82E56A92E56AA2E56AB2383
-:10656000262918D946DB101CD99DC0D42A42452DB6
-:1065700016012C160000B0890A880C98905BFF94D5
-:106580002C22E318D90F0C5C149C842B22E48C84FD
-:10659000B1BB0B5B140CBB0C9B852A22E50A5A1479
-:1065A0002A86062922CD0959142986072F22892FE8
-:1065B00086095BFF435BFF1423463BC1B01ED90035
-:1065C0001DD9602AE1022D463A0BAA020A0A4F2A77
-:1065D000E5025804965BFEBD5BFE96C050C0B01647
-:1065E000D8F614D8FE17D96FC0C0C73E93122C2618
-:1065F0002DC0306000440000007F9F0FB155091985
-:1066000014659FF4C0500AA9027FA7EF18D8EADAF0
-:106610005008580A28822C2B0A000B8000005104D5
-:10662000D2A0C091C7AF00991A0A99039912CE3827
-:1066300064206BD3202B20072516032C12022A621C
-:10664000827CA86318D8DC01110208580A28822C21
-:10665000DA500B8000D2A0643FD58A310A8A140434
-:10666000AA01C82A2B22010B8B1404BB017BA9456C
-:10667000DDA07A7B081DD8D22DD2000DAD0CDB3009
-:1066800019D8CD1AD91488130ADA28DC801DD951FB
-:1066900009880A28823C0DAA080B8000652F93D335
-:1066A00020C0B063FF9400007FAF34B155005004A8
-:1066B0000A091963FF42DAB07B7B081AD8C12AA203
-:1066C000000ABA0C1BD9048C310BAB280C8A141CA1
-:1066D000D941ACBB1CD94104AA012BC68163FF8FF1
-:1066E000645F60C050C0B0C7CE9C1263FF5500000D
-:1066F0006C100427221EC08008E4311BD8AF0002B2
-:10670000002AB28219D8AF003104C06100661A298C
-:1067100091020A6A022AB68209E43115D90C0C38B2
-:1067200011A8532832822432842A8CFC7841102903
-:1067300021022A368297A0096902292502D10F0079
-:106740002B21022C32850B6B022CCCFC2C36829731
-:10675000C02B2502D10F00006C1004C0E71DD89299
-:106760001CD8940D4911D7208B228A200B4B0BD2B9
-:10677000A007A80C9B72288CF4C8346F8E026000AE
-:10678000A31FD88AA298AF7B78B334C93DC081C01B
-:10679000F0028F380F0F42C9FA2CD67ED5206D4AF1
-:1067A0000500308800508C887008980878B16DD248
-:1067B000A09870D10FC0F0038F387FE0DE63FFD860
-:1067C000027B0CAFBB0B990C643047D830C0F1C0D2
-:1067D0005002F5380505426450792CD67E0B3612EE
-:1067E0002F6C100F4F366DFA0500808800208C0644
-:1067F000440CC081C05003B208237C0C03853805CB
-:10680000054264505A2CD67ED30F6D4A050020886D
-:1068100000308CD2A0A798BC889870D10FD2A0BCB1
-:10682000799970D10FD2302BAD08C0F1C0500BF563
-:1068300038050542CB542CD67E083F14260A100F8B
-:10684000660C0646366D6A0500208800B08C8270A2
-:1068500063FF2D00C05003F53875E08063FF7A00B8
-:10686000C06002863876E09F63FF9900C05003F550
-:106870003875E0C463FFBE006C1004D62068520F68
-:10688000695324DA20DB30DC405800F7D2A0D10F66
-:10689000DA20DB30DC405800F49A2424240EC02196
-:1068A00022640FC020D10F00B83BB04C2A2C748951
-:1068B000242D200E2E200FA4DDB1EE2E240FB0DDEE
-:1068C0002D240E2890072D9003A488B088B1DD2DCB
-:1068D00094032894075BFFA069511DC0E082242A1D
-:1068E000600F18D8BF2A240329600E8F202924079F
-:1068F00008FF029F209E64D10FC020D10F0000002E
-:106900006C1004942319D8B7C0B3083A110BAA022B
-:10691000992019D8299A2116D827C05028929D2548
-:1069200064A2288C1828969DD10F00006C100428B2
-:106930002066C038232406B788282466D10F0000BB
-:106940006C10060D3C111AD819D820035B0C862256
-:106950000D55118221AA8902320B928105630C9395
-:10696000820C550C792B54CB531CD8111DD80FC059
-:10697000F7A256C031C0A0043A380A0A42769343BF
-:10698000044302C9AB2CD67ED30F6DBA0500208814
-:1069900000308C8281A25272917D92818382C83EA6
-:1069A000D10FC071C06002763876F0DB63FFD5008E
-:1069B000C020BC89998199809282D10F222DF892B2
-:1069C0008163FFA219D7FA02860CA9669611D940F5
-:1069D000063612961006BB0C64A0442CD67E8A1094
-:1069E000D30F6DAA0500208800908CBC828311C053
-:1069F000E0A433240A01034E380E0E42CAEC2CD612
-:106A00007E6DBA0500208800308C821102520CA2E3
-:106A100082BC22928163FF83BC82928163FF7C00EF
-:106A2000C06002363876F0B563FFAF00C070024731
-:106A30003877F0CC63FFC6006C100414D7EBC1525A
-:106A4000A424CA3128221D73811C292102659016B5
-:106A50002A300075A912022A02033B022C3007C01B
-:106A6000D25801D5653FDCD10F2B300703BB0B0B90
-:106A7000BA0274B3022ABDF8D3A063FFC4000000B9
-:106A80006C1004292006C0706E9741292102C08F26
-:106A90002A2014C0B62B240606AA022A24147980C0
-:106AA000022725022A221E2C221D7AC10EC8ABDA2B
-:106AB00020DB302C0A00033D025BF7F96450892D7E
-:106AC00021020D0D4CC9D3C020D10F00002E9CFB1C
-:106AD00064E0962F21020F0F4C65F0A51AD7B71E60
-:106AE000D7B529A29EC08A798B712BE22668B004A3
-:106AF0008C207BC96629A29D1FD7B264905D9790B8
-:106B0000C0C31DD7C62B21049D9608BB110CBB0228
-:106B10009B919B971CD7C3C08527E4A22BA29D28DD
-:106B200024068DFA282102B0DD2BBC302BA69D9DBA
-:106B3000FA0C8802282502C8D2C020D10F8EF91283
-:106B4000D7B92E2689C020D10F283000688938DABD
-:106B500020DB30DC4058004463FF6300022A022B34
-:106B60000A065800D3220A00D10F655010293000C0
-:106B7000689924022A02033B02DC4058003BC020F3
-:106B8000D10FD270D10F00002A2C74033B02044CA9
-:106B9000025BFEF163FF2700DB30DC402A2C745BD4
-:106BA000FEEEC020D10F00006C1004C83F8926887B
-:106BB00029A399992609880C080848282525CC522C
-:106BC000C020D10FDB402A2C745BF92FD2A0D10F4B
-:106BD0006C1004D820D73082220D451105220C926A
-:106BE0008264207407420B13D771D420A3837323CC
-:106BF00002242DF8858074514CBC82C0906D08161B
-:106C000000408800708C773903D720C0918680744B
-:106C10003901D42074610263FFE2CA98C097C04171
-:106C20001BD7F2C0A00B8B0C0B4A380A0A42C9AA28
-:106C30001DD75E1CD75F2CD67EC140D30F6D4A0591
-:106C400000208800308C9780D270D10FBC8FC0E0BC
-:106C50000F4E387E90E263FFD6BC8292819280C054
-:106C6000209282D10F0000006C1006C0D71CD74EB6
-:106C70001BD7500D4911D7202E221F28221D0E4E42
-:106C80000BD280078A0C2E761F2AAC80C8346FAED8
-:106C9000026000CB2F0A801AD754A29EAA7A7EA344
-:106CA0003FC93FC0E1C05002E538050542CA552B37
-:106CB000C67EDB20D30F6D4A0500308800B08C2ED5
-:106CC000721DAE9E0EA50C645086D2802E761DC01D
-:106CD00091298403D10FC05003E53875D0D363FFE9
-:106CE000CD15D741027E0CA5EE643051C0A1250A16
-:106CF0000002A538033A020505426450922BC67E75
-:106D00000E35129510255C10054536D30F6D5A05CA
-:106D100000A08800208CC0A1A3E2C05023FA800309
-:106D2000730C03A538AF730505426450722BC67E01
-:106D3000851005450C6D5A0500208800308CD280E6
-:106D4000C0A10E9B0CAB7BAFBB2B761D2A8403D15D
-:106D50000FD280C0C1AF7D2D761D2C8403D10F00D2
-:106D6000D2302E8D08C0F1C0500EF538050542CB4B
-:106D7000592BC67E0A3F14C1600F660C064636D3F7
-:106D80000F6D6A0500208800E08C22721D63FF03EE
-:106D9000C061C05003653875D80263FF6263FF5C51
-:106DA000C05002A53875D08763FF8100C06003F62C
-:106DB0003876D0BF63FFB9006C10042A2015292053
-:106DC0001614D6FF0A990CCB9D2E200B04ED092B2F
-:106DD000D1208F2809BC36ACAA0CBB0C2BD5200ABD
-:106DE0000A472A2415CAAF8B438942B0A8009104F0
-:106DF00000881AA8FF0FBB029B278F260FB80C78BC
-:106E00003B1AC020D10F0000292102C0A20A99021A
-:106E1000292502C021D10F008B2763FFDC2BD12055
-:106E20000CAA0C0A0A472A2415ACBB2BD520C9AEE4
-:106E30008B438C288F42B0AD00F10400DD1AADCC3D
-:106E40000CBB029B27DA20B7EB580019C021D10FE9
-:106E50009F2763FFEF0000006C100428203C643083
-:106E60004705306000073E01053EB156076539050C
-:106E70004928C77FA933030641076603B1660606A2
-:106E800041A6337E871E222125291AFC732B150269
-:106E9000380C09816000063E01023EB124064239E9
-:106EA00003220AD10FD230D10FC05163FFC00000BE
-:106EB0006C100427221EC08008E4311DD6BF0002DA
-:106EC000002CD2821BD6BF003104C06100661A2B91
-:106ED000B1020C6C022CD6820BE43119D7440C3A67
-:106EE00011AA932832829780253282243284B455A5
-:106EF00025368275410A292102096902292502D114
-:106F00000F2A21022B32830A6A022B36822A25029B
-:106F1000D10F00006C100418D6A80C2711087708B0
-:106F2000267286253C04765B1315D6A405220A2218
-:106F300022A3682002742904227285D10FC020D1B7
-:106F40000F0000006C100419D6A727221EC080096C
-:106F5000770208E4311DD6980002002CD2821BD69D
-:106F600098003104C06100661A2BB1020C6C022C2F
-:106F7000D6820BE43119D71D0C3A11AA932832821C
-:106F80009780253282243284B45525368275410B90
-:106F90002A21020A6A022A2502D10F002B21022C83
-:106FA00032830B6B022C36822B2502D10F0000009E
-:106FB0006C10041BD6810C2A11ABAA29A286B43806
-:106FC000798B221BD67E19D6A50B2B0A2BB2A309CF
-:106FD000290868B00274B90D299D0129901F6E928D
-:106FE0000822A285D10FC020D10FC892C020D10F96
-:106FF000DA205BEE88C020D10F0000006C10041472
-:10700000D66E28429E19D66B6F88026000BA29920C
-:10701000266890078A2009AA0C65A0AC2A429DC068
-:10702000DC64A0A42B200C19D6650CBC11A4CC2EBA
-:10703000C28609B90A7ED30260009A2992A3689099
-:10704000078D2009DD0C65D08C25C2856450862D06
-:107050002104C0306ED80D2C2066B8CC0C0C472C07
-:10706000246665C07B1CD6E218D66B1AD66219D688
-:10707000731DD667C0E49E519D508F209357935542
-:1070800099539A569A5408FF021AD6839F5288261B
-:107090009F5A9E599D58935E9C5D935C9A5B08082D
-:1070A00048058811985FC0D81FD64C0CB911A49917
-:1070B000289285AFBF23F4CF288C402896858E2652
-:1070C0002D24069E29C020D10FCA33DA20C0B65B1A
-:1070D000FF78C72FD10FC93ADA205BFF75C72FD1D0
-:1070E0000FDBD05BFE072324662B200C63FF7500AB
-:1070F000C72FD10FC72FD10F6C1004C85B292006F2
-:1071000068941C689607C020D10FC020D10FDA20E8
-:10711000DB30DC40DD502E0A005BFE59D2A0D10FDF
-:107120002E200C18D6250CEF11A8FF29F286C08856
-:10713000798B791AD6220AEA0A2AA2A368A0048BBC
-:10714000207AB96823F2856430621BD62C290A8024
-:107150002C20682820672D21040B881104DD1108DC
-:10716000DD020DCC02C0842D4A100DCC021DD624A8
-:1071700098319D308A2B99379C340BAA02C0C09C51
-:10718000359C369A322A2C74DB4028F285C0D328ED
-:107190008C2028F6852C25042D24061FD60FDD40D3
-:1071A000AFEE2CE4CF5BFDE6D2A0D10F00DA20DBFE
-:1071B000E05BFF3FC020D10F6C100AD6302A2006BA
-:1071C00024160128ACF86583862B2122C0F22A21DF
-:1071D00024CC572AAC010A0A4F2A25247ABB026024
-:1071E000037F2C21020C0C4C65C3192E22158D3205
-:1071F000C0910EDD0C65D39088381ED5EF64836B8B
-:107200008C37C0B8C0960CB9399914B49A9A120D3B
-:10721000991199138F6718D5EAC9FB2880217F83BC
-:10722000168B142C22002A200C5BFF61D4A064A3CF
-:10723000B38F6760002800002B200C89120CBA1154
-:10724000AEAA2CA2861DD5DD7C9B3E0DBD0A2DD29B
-:10725000A368D00488207D893024A28564436427F4
-:10726000212E07F73607F90C6F9D01D7F0DA20DBE6
-:1072700070C1C42D211F5BFEF889268827DDA00977
-:10728000880C7A8B179A10600006C04063FFCC0010
-:1072900000DA208B105BFEC88D1065A267C0E09EEF
-:1072A000488C649C498B658A669B4A9A4B97458FAC
-:1072B000677F7302600120CD529D10DA20DB302CF5
-:1072C00012015BFE698D10C051D6A08FA7C0C08A85
-:1072D00068974D9A4C8869896A984E994F8E6A8A48
-:1072E00069AE7E77EB01B1AA9E6A9A698B60C0A0F5
-:1072F0000B8E1477B701C0A1C091C08493159D1760
-:107300009516C0D025203CC030085801089338C0DD
-:1073100082083310085B010535400B9D3807DD10EE
-:107320000BAB100E19402A211F07991003DD020D27
-:10733000BB020553100933020A55112921250A2AD7
-:10734000140929140499110A99020933028A2B2974
-:1073500021040BAA021BD6270899110955020855CA
-:10736000020BAA029A408920881408991109880200
-:1073700019D5A61DD62109880298418B2A934695D6
-:107380004783150DBB0285168D179B448A65896658
-:10739000AACAA97C77CB01B1AA07FB0C9C669A65A7
-:1073A00088268E29AD87972607EE0C0E0E482E25CF
-:1073B000259B672B200C87131ED5800CB911AE9925
-:1073C000289285A78828968517D584C090A7BB29C1
-:1073D000B4CF871863FE3C008C60C0E0C091C0F061
-:1073E000C034C0B82A210428203C08AA110B8B0104
-:1073F000038301039F380B9B39C03208FF100388B9
-:1074000001089E380C881407EE100FEE0203880165
-:1074100008983905BF1029211F0ABB1107881008D9
-:10742000FF020BAA0218D57809291403AA022B21FE
-:107430002583200B2B1404BB110833110FBB020B47
-:1074400099028B148F2A0B33020833028B2B647042
-:10745000868868974D984C8769886A9341994697C2
-:107460004E984FC07077C701C0719A4718D5E30B8B
-:107470007C100CEC0208F802984418D5E00CBC0211
-:1074800008CC029C402A200C295CFEC0801FD54AF3
-:107490001CD5520CAE112B2124ACAAAFEEB0BB8F81
-:1074A000132CE28528A4CFAFCC2CE6852A22152BFD
-:1074B0002524B1AA2A26156490DBC9D28F262E2254
-:1074C000090DFF082F26060FEE0C0E0E482E25255F
-:1074D0006550E4C020D10F00C07093419F4499468D
-:1074E0009A4777C70A1CD5362CC022C0810C873832
-:1074F0001CD5C40B781008E80208B8020C88029862
-:107500004063FF8000CC57DA20DB608C115BFDD636
-:10751000292102689806689403C020D10F2B221EEF
-:10752000C0A029221D2A25027B9901C0B064BFE8B2
-:1075300013D5212CB00728B000DA2003880A28824E
-:107540004CC0D10B8000DBA065AFE763FFCA000031
-:1075500068A779DA20DB30DC40DD505BFEE7D2A0A3
-:10756000D10FC16DC19D29252C60000429252CD681
-:10757000902624672F2468DA20DB308C11DD502E12
-:107580000A805BFD3FD2A0D10FC168C1A82A252C7B
-:1075900063FFDD000000C8DF8C268B29ADCC9C2664
-:1075A0000CBB0C0B0B482B25252A2C74DB602C12F2
-:1075B000015BFD87D2A0D10F2A2C748B115BF6B230
-:1075C000D2A0D10FDA205BFE3A63FF3800DA20C088
-:1075D000B15BFE8A64ABF1655F352D2124B1DD2DF1
-:1075E000252463FF1FDA202B200C5BFE5663FF145B
-:1075F00012D5858220028257C82163FFFC12D581F3
-:1076000003E83004EE3005B13093209421952263D5
-:10761000FFFC000010D57D910092019302940311AC
-:10762000D554821001EA30A21101F031C04004E4C7
-:107630001600020011D5768210234A00032202921E
-:107640001011D540C021921004E4318403830282DA
-:1076500001810000D23001230000000010D56D919F
-:107660000092019302940311D543821001EA30A2E3
-:107670001101F131C04004E41600020011D564820A
-:107680001013D4E7032202921004E431840383022E
-:107690008201810000D330013300000010D55E91DB
-:1076A00000810165104981026510448103CF1F925A
-:1076B000019302940311D531821001EA30A2110125
-:1076C000F231C04004E41600020011D550821013BC
-:1076D000D4CF032202921004E43184038302820196
-:1076E000C010910391029101810000D43001430048
-:1076F00012D500C03028374028374428374828376B
-:107700004C233D017233ED03020063FFFC000000D7
-:1077100010D542910092019302940311D54082103A
-:10772000921011D4F28310032202921011D53D124F
-:10773000D5049210C04004E41600020011D5348232
-:107740001013D4EB032202921004E4318403830269
-:107750008201810000D53001530000006C10026EE0
-:10776000322FD620056F04043F04745B2A05440CB5
-:1077700000410400331A220A006D490D73630403AB
-:10778000660CB1220F2211031314736302222C0121
-:10779000D10FC83BD10F000073630CC021D10F0083
-:1077A0000000000044495630C020D10F6C10020088
-:1077B00040046B4C07032318020219D10F0203196E
-:1077C000C020D10F6C100202EA30D10F6C1002CC35
-:1077D0002503F03160000F006F220503F1316000D6
-:1077E000056F230503F231000200D10F6C1002CCAB
-:1077F0002502F030D10F00006F220402F130D10FCA
-:107800006F230402F230D10FC020D10F6C1002227E
-:107810000A20230A006D280E2837402837442837CD
-:107820004828374C233D01030200D10F6C1002029F
-:10783000E431D10F0A0000004368656C73696F2062
-:1078400046572044454255473D3020284275696CD3
-:1078500074204D6F6E204D61722020382031373AF0
-:1078600032383A3135205053542032303130206F85
-:107870006E20636C656F70617472612E61736963F1
-:1078800064657369676E6572732E636F6D3A2F68F6
-:107890006F6D652F66656C69782F772F66775F3718
-:1078A0002E392D6977617270292C205665727369A3
-:1078B0006F6E2054337878203030372E30612E3080
-:1078C00030202D20313030373061303010070A0041
-:0478D0000BDFE8756D
-:00000001FF
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 1feb68e..842d000 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -94,25 +94,21 @@
 {
 	struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
 	struct list_head *next;
-	struct dentry *p, *q;
+	struct dentry *q;
 
 	spin_lock(&sbi->lookup_lock);
+	spin_lock(&root->d_lock);
 
-	if (prev == NULL) {
-		spin_lock(&root->d_lock);
+	if (prev)
+		next = prev->d_u.d_child.next;
+	else {
 		prev = dget_dlock(root);
 		next = prev->d_subdirs.next;
-		p = prev;
-		goto start;
 	}
 
-	p = prev;
-	spin_lock(&p->d_lock);
-again:
-	next = p->d_u.d_child.next;
-start:
+cont:
 	if (next == &root->d_subdirs) {
-		spin_unlock(&p->d_lock);
+		spin_unlock(&root->d_lock);
 		spin_unlock(&sbi->lookup_lock);
 		dput(prev);
 		return NULL;
@@ -121,16 +117,15 @@
 	q = list_entry(next, struct dentry, d_u.d_child);
 
 	spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
-	/* Negative dentry - try next */
-	if (!simple_positive(q)) {
-		spin_unlock(&p->d_lock);
-		lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_);
-		p = q;
-		goto again;
+	/* Already gone or negative dentry (under construction) - try next */
+	if (q->d_count == 0 || !simple_positive(q)) {
+		spin_unlock(&q->d_lock);
+		next = q->d_u.d_child.next;
+		goto cont;
 	}
 	dget_dlock(q);
 	spin_unlock(&q->d_lock);
-	spin_unlock(&p->d_lock);
+	spin_unlock(&root->d_lock);
 	spin_unlock(&sbi->lookup_lock);
 
 	dput(prev);
@@ -404,11 +399,6 @@
 			DPRINTK("checking mountpoint %p %.*s",
 				dentry, (int)dentry->d_name.len, dentry->d_name.name);
 
-			/* Path walk currently on this dentry? */
-			ino_count = atomic_read(&ino->count) + 2;
-			if (dentry->d_count > ino_count)
-				goto next;
-
 			/* Can we umount this guy */
 			if (autofs4_mount_busy(mnt, dentry))
 				goto next;
diff --git a/fs/bio.c b/fs/bio.c
index 73922ab..5eaa70c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1312,7 +1312,7 @@
  * Note that this code is very hard to test under normal circumstances because
  * direct-io pins the pages with get_user_pages().  This makes
  * is_page_cache_freeable return false, and the VM will not clean the pages.
- * But other code (eg, pdflush) could clean the pages if they are mapped
+ * But other code (eg, flusher threads) could clean the pages if they are mapped
  * pagecache.
  *
  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 83baec2..6e8f416 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -324,7 +324,8 @@
  * If this code finds it can't get good compression, it puts an
  * entry onto the work queue to write the uncompressed bytes.  This
  * makes sure that both compressed inodes and uncompressed inodes
- * are written in the same order that pdflush sent them down.
+ * are written in the same order that the flusher thread sent them
+ * down.
  */
 static noinline int compress_file_range(struct inode *inode,
 					struct page *locked_page,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index bc2f6ff..7bb7556 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -664,10 +664,6 @@
 	struct dentry *dentry;
 	int error;
 
-	error = mnt_want_write(parent->mnt);
-	if (error)
-		return error;
-
 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
 
 	dentry = lookup_one_len(name, parent->dentry, namelen);
@@ -703,7 +699,6 @@
 	dput(dentry);
 out_unlock:
 	mutex_unlock(&dir->i_mutex);
-	mnt_drop_write(parent->mnt);
 	return error;
 }
 
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 643335a..051c7fe 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -596,7 +596,7 @@
 	/*
 	 * pages in the range can be dirty, clean or writeback.  We
 	 * start IO on any dirty ones so the wait doesn't stall waiting
-	 * for pdflush to find them
+	 * for the flusher thread to find them
 	 */
 	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 		filemap_fdatawrite_range(inode->i_mapping, start, end);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8c6e61d..f2eb24c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -100,10 +100,6 @@
 	fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
 }
 
-/* NOTE:
- *	We move write_super stuff at umount in order to avoid deadlock
- *	for umount hold all lock.
- */
 static void save_error_info(struct btrfs_fs_info *fs_info)
 {
 	__save_error_info(fs_info);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b8708f9..e86ae04 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1744,10 +1744,6 @@
 
 	device->fs_devices = root->fs_info->fs_devices;
 
-	/*
-	 * we don't want write_supers to jump in here with our device
-	 * half setup
-	 */
 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
 	list_add(&device->dev_alloc_list,
diff --git a/fs/compat.c b/fs/compat.c
index 6161255..1bdb350 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1155,11 +1155,14 @@
 	struct file *file;
 	int fput_needed;
 	ssize_t ret;
+	loff_t pos;
 
 	file = fget_light(fd, &fput_needed);
 	if (!file)
 		return -EBADF;
-	ret = compat_readv(file, vec, vlen, &file->f_pos);
+	pos = file->f_pos;
+	ret = compat_readv(file, vec, vlen, &pos);
+	file->f_pos = pos;
 	fput_light(file, fput_needed);
 	return ret;
 }
@@ -1221,11 +1224,14 @@
 	struct file *file;
 	int fput_needed;
 	ssize_t ret;
+	loff_t pos;
 
 	file = fget_light(fd, &fput_needed);
 	if (!file)
 		return -EBADF;
-	ret = compat_writev(file, vec, vlen, &file->f_pos);
+	pos = file->f_pos;
+	ret = compat_writev(file, vec, vlen, &pos);
+	file->f_pos = pos;
 	fput_light(file, fput_needed);
 	return ret;
 }
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 5badb0c..1562c27 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -37,15 +37,12 @@
 
 #define EXOFS_DBGMSG2(M...) do {} while (0)
 
-enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
-
 unsigned exofs_max_io_pages(struct ore_layout *layout,
 			    unsigned expected_pages)
 {
-	unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
+	unsigned pages = min_t(unsigned, expected_pages,
+			       layout->max_io_length / PAGE_SIZE);
 
-	/* TODO: easily support bio chaining */
-	pages =  min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
 	return pages;
 }
 
@@ -101,7 +98,8 @@
 	 * it might not end here. don't be left with nothing
 	 */
 	if (!pcol->expected_pages)
-		pcol->expected_pages = MAX_PAGES_KMALLOC;
+		pcol->expected_pages =
+				exofs_max_io_pages(&pcol->sbi->layout, ~0);
 }
 
 static int pcol_try_alloc(struct page_collect *pcol)
@@ -389,6 +387,8 @@
 	size_t len;
 	int ret;
 
+	BUG_ON(!PageLocked(page));
+
 	/* FIXME: Just for debugging, will be removed */
 	if (PageUptodate(page))
 		EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
@@ -572,8 +572,16 @@
 
 	if (!pcol->that_locked_page ||
 	    (pcol->that_locked_page->index != index)) {
-		struct page *page = find_get_page(pcol->inode->i_mapping, index);
+		struct page *page;
+		loff_t i_size = i_size_read(pcol->inode);
 
+		if (offset >= i_size) {
+			*uptodate = true;
+			EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index);
+			return ZERO_PAGE(0);
+		}
+
+		page =  find_get_page(pcol->inode->i_mapping, index);
 		if (!page) {
 			page = find_or_create_page(pcol->inode->i_mapping,
 						   index, GFP_NOFS);
@@ -602,12 +610,13 @@
 {
 	struct page_collect *pcol = priv;
 
-	if (pcol->that_locked_page != page) {
+	if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
 		EXOFS_DBGMSG("index=0x%lx\n", page->index);
 		page_cache_release(page);
 		return;
 	}
-	EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
+	EXOFS_DBGMSG("that_locked_page index=0x%lx\n",
+		     ZERO_PAGE(0) == page ? -1 : page->index);
 }
 
 static const struct _ore_r4w_op _r4w_op = {
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 24a49d4..1585db1 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -837,11 +837,11 @@
 				bio->bi_rw |= REQ_WRITE;
 			}
 
-			osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
-				      bio, per_dev->length);
+			osd_req_write(or, _ios_obj(ios, cur_comp),
+				      per_dev->offset, bio, per_dev->length);
 			ORE_DBGMSG("write(0x%llx) offset=0x%llx "
 				      "length=0x%llx dev=%d\n",
-				     _LLU(_ios_obj(ios, dev)->id),
+				     _LLU(_ios_obj(ios, cur_comp)->id),
 				     _LLU(per_dev->offset),
 				     _LLU(per_dev->length), dev);
 		} else if (ios->kern_buff) {
@@ -853,20 +853,20 @@
 			       (ios->si.unit_off + ios->length >
 				ios->layout->stripe_unit));
 
-			ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
+			ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
 						 per_dev->offset,
 						 ios->kern_buff, ios->length);
 			if (unlikely(ret))
 				goto out;
 			ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
 				      "length=0x%llx dev=%d\n",
-				     _LLU(_ios_obj(ios, dev)->id),
+				     _LLU(_ios_obj(ios, cur_comp)->id),
 				     _LLU(per_dev->offset),
 				     _LLU(ios->length), per_dev->dev);
 		} else {
-			osd_req_set_attributes(or, _ios_obj(ios, dev));
+			osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
 			ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
-				     _LLU(_ios_obj(ios, dev)->id),
+				     _LLU(_ios_obj(ios, cur_comp)->id),
 				     ios->out_attr_len, dev);
 		}
 
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 4337836..dde41a7 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -400,8 +400,6 @@
 	ret = ore_write(ios);
 	if (unlikely(ret))
 		EXOFS_ERR("%s: ore_write failed.\n", __func__);
-	else
-		sb->s_dirt = 0;
 
 
 	unlock_super(sb);
@@ -412,14 +410,6 @@
 	return ret;
 }
 
-static void exofs_write_super(struct super_block *sb)
-{
-	if (!(sb->s_flags & MS_RDONLY))
-		exofs_sync_fs(sb, 1);
-	else
-		sb->s_dirt = 0;
-}
-
 static void _exofs_print_device(const char *msg, const char *dev_path,
 				struct osd_dev *od, u64 pid)
 {
@@ -952,7 +942,6 @@
 	.write_inode    = exofs_write_inode,
 	.evict_inode    = exofs_evict_inode,
 	.put_super      = exofs_put_super,
-	.write_super    = exofs_write_super,
 	.sync_fs	= exofs_sync_fs,
 	.statfs         = exofs_statfs,
 };
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 9a4a5c4..a075973 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3459,14 +3459,6 @@
  * inode out, but prune_icache isn't a user-visible syncing function.
  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
  * we start and wait on commits.
- *
- * Is this efficient/effective?  Well, we're being nice to the system
- * by cleaning up our inodes proactively so they can be reaped
- * without I/O.  But we are potentially leaving up to five seconds'
- * worth of inodes floating about which prune_icache wants us to
- * write out.  One way to fix that would be to get prune_icache()
- * to do a write_super() to free up some memory.  It has the desired
- * effect.
  */
 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
 {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index ff9bcdc..8c892e9 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -64,11 +64,6 @@
 
 /*
  * Wrappers for journal_start/end.
- *
- * The only special thing we need to do here is to make sure that all
- * journal_end calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
  */
 handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
 {
@@ -90,12 +85,6 @@
 	return journal_start(journal, nblocks);
 }
 
-/*
- * The only special thing we need to do here is to make sure that all
- * journal_stop calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- */
 int __ext3_journal_stop(const char *where, handle_t *handle)
 {
 	struct super_block *sb;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index d23b31c..1b50890 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -280,14 +280,18 @@
 	return desc;
 }
 
-static int ext4_valid_block_bitmap(struct super_block *sb,
-				   struct ext4_group_desc *desc,
-				   unsigned int block_group,
-				   struct buffer_head *bh)
+/*
+ * Return the block number which was discovered to be invalid, or 0 if
+ * the block bitmap is valid.
+ */
+static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
+					    struct ext4_group_desc *desc,
+					    unsigned int block_group,
+					    struct buffer_head *bh)
 {
 	ext4_grpblk_t offset;
 	ext4_grpblk_t next_zero_bit;
-	ext4_fsblk_t bitmap_blk;
+	ext4_fsblk_t blk;
 	ext4_fsblk_t group_first_block;
 
 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
@@ -297,37 +301,33 @@
 		 * or it has to also read the block group where the bitmaps
 		 * are located to verify they are set.
 		 */
-		return 1;
+		return 0;
 	}
 	group_first_block = ext4_group_first_block_no(sb, block_group);
 
 	/* check whether block bitmap block number is set */
-	bitmap_blk = ext4_block_bitmap(sb, desc);
-	offset = bitmap_blk - group_first_block;
+	blk = ext4_block_bitmap(sb, desc);
+	offset = blk - group_first_block;
 	if (!ext4_test_bit(offset, bh->b_data))
 		/* bad block bitmap */
-		goto err_out;
+		return blk;
 
 	/* check whether the inode bitmap block number is set */
-	bitmap_blk = ext4_inode_bitmap(sb, desc);
-	offset = bitmap_blk - group_first_block;
+	blk = ext4_inode_bitmap(sb, desc);
+	offset = blk - group_first_block;
 	if (!ext4_test_bit(offset, bh->b_data))
 		/* bad block bitmap */
-		goto err_out;
+		return blk;
 
 	/* check whether the inode table block number is set */
-	bitmap_blk = ext4_inode_table(sb, desc);
-	offset = bitmap_blk - group_first_block;
+	blk = ext4_inode_table(sb, desc);
+	offset = blk - group_first_block;
 	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
 				offset + EXT4_SB(sb)->s_itb_per_group,
 				offset);
-	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
-		/* good bitmap for inode tables */
-		return 1;
-
-err_out:
-	ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
-			block_group, bitmap_blk);
+	if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
+		/* bad bitmap for inode tables */
+		return blk;
 	return 0;
 }
 
@@ -336,14 +336,26 @@
 			       unsigned int block_group,
 			       struct buffer_head *bh)
 {
+	ext4_fsblk_t	blk;
+
 	if (buffer_verified(bh))
 		return;
 
 	ext4_lock_group(sb, block_group);
-	if (ext4_valid_block_bitmap(sb, desc, block_group, bh) &&
-	    ext4_block_bitmap_csum_verify(sb, block_group, desc, bh,
-					  EXT4_BLOCKS_PER_GROUP(sb) / 8))
-		set_buffer_verified(bh);
+	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
+	if (unlikely(blk != 0)) {
+		ext4_unlock_group(sb, block_group);
+		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
+			   block_group, blk);
+		return;
+	}
+	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+			desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) {
+		ext4_unlock_group(sb, block_group);
+		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+		return;
+	}
+	set_buffer_verified(bh);
 	ext4_unlock_group(sb, block_group);
 }
 
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index f8716ea..5c2d181 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -79,7 +79,6 @@
 	if (provided == calculated)
 		return 1;
 
-	ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);
 	return 0;
 }
 
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index cd0c7ed..aabbb3f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2662,6 +2662,7 @@
 		}
 		path[0].p_depth = depth;
 		path[0].p_hdr = ext_inode_hdr(inode);
+		i = 0;
 
 		if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
 			err = -EIO;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6324f74e..dff171c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1970,7 +1970,7 @@
  * This function can get called via...
  *   - ext4_da_writepages after taking page lock (have journal handle)
  *   - journal_submit_inode_data_buffers (no journal handle)
- *   - shrink_page_list via pdflush (no journal handle)
+ *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
  *   - grab_page_cache when doing write_begin (have journal handle)
  *
  * We don't do any block allocation in this function. If we have page with
@@ -4589,14 +4589,6 @@
  * inode out, but prune_icache isn't a user-visible syncing function.
  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
  * we start and wait on commits.
- *
- * Is this efficient/effective?  Well, we're being nice to the system
- * by cleaning up our inodes proactively so they can be reaped
- * without I/O.  But we are potentially leaving up to five seconds'
- * worth of inodes floating about which prune_icache wants us to
- * write out.  One way to fix that would be to get prune_icache()
- * to do a write_super() to free up some memory.  It has the desired
- * effect.
  */
 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
 {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d76ec82..c6e0cb3 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -326,11 +326,6 @@
 
 /*
  * Wrappers for jbd2_journal_start/end.
- *
- * The only special thing we need to do here is to make sure that all
- * journal_end calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
  */
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
 {
@@ -356,12 +351,6 @@
 	return jbd2_journal_start(journal, nblocks);
 }
 
-/*
- * The only special thing we need to do here is to make sure that all
- * jbd2_journal_stop calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- */
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
 {
 	struct super_block *sb;
@@ -959,6 +948,7 @@
 	ei->i_reserved_meta_blocks = 0;
 	ei->i_allocated_meta_blocks = 0;
 	ei->i_da_metadata_calc_len = 0;
+	ei->i_da_metadata_calc_last_lblock = 0;
 	spin_lock_init(&(ei->i_block_reservation_lock));
 #ifdef CONFIG_QUOTA
 	ei->i_reserved_quota = 0;
@@ -3119,6 +3109,10 @@
 	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
 	int			s, j, count = 0;
 
+	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
+		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+			sbi->s_itb_per_group + 2);
+
 	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
 		(grp * EXT4_BLOCKS_PER_GROUP(sb));
 	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
@@ -4430,6 +4424,7 @@
 		ext4_commit_super(sb, 1);
 
 		jbd2_journal_clear_err(journal);
+		jbd2_journal_update_sb_errno(journal);
 	}
 }
 
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8964cf3..324bc08 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -383,6 +383,9 @@
 	struct fuse_entry_out outentry;
 	struct fuse_file *ff;
 
+	/* Userspace expects S_IFREG in create mode */
+	BUG_ON((mode & S_IFMT) != S_IFREG);
+
 	forget = fuse_alloc_forget();
 	err = -ENOMEM;
 	if (!forget)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 93d8d6c..aba15f1 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -703,13 +703,16 @@
 				  unsigned long nr_segs, loff_t pos)
 {
 	struct inode *inode = iocb->ki_filp->f_mapping->host;
+	struct fuse_conn *fc = get_fuse_conn(inode);
 
-	if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
+	/*
+	 * In auto invalidate mode, always update attributes on read.
+	 * Otherwise, only update if we attempt to read past EOF (to ensure
+	 * i_size is up to date).
+	 */
+	if (fc->auto_inval_data ||
+	    (pos + iov_length(iov, nr_segs) > i_size_read(inode))) {
 		int err;
-		/*
-		 * If trying to read past EOF, make sure the i_size
-		 * attribute is up-to-date.
-		 */
 		err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
 		if (err)
 			return err;
@@ -1700,7 +1703,7 @@
 	size_t n;
 	u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
 
-	for (n = 0; n < count; n++) {
+	for (n = 0; n < count; n++, iov++) {
 		if (iov->iov_len > (size_t) max)
 			return -ENOMEM;
 		max -= iov->iov_len;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 771fb63..e24dd74 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -484,6 +484,9 @@
 	/** Is fallocate not implemented by fs? */
 	unsigned no_fallocate:1;
 
+	/** Use enhanced/automatic page cache invalidation. */
+	unsigned auto_inval_data:1;
+
 	/** The number of requests waiting for completion */
 	atomic_t num_waiting;
 
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1cd6165..ce0a283 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -197,6 +197,7 @@
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	struct fuse_inode *fi = get_fuse_inode(inode);
 	loff_t oldsize;
+	struct timespec old_mtime;
 
 	spin_lock(&fc->lock);
 	if (attr_version != 0 && fi->attr_version > attr_version) {
@@ -204,15 +205,35 @@
 		return;
 	}
 
+	old_mtime = inode->i_mtime;
 	fuse_change_attributes_common(inode, attr, attr_valid);
 
 	oldsize = inode->i_size;
 	i_size_write(inode, attr->size);
 	spin_unlock(&fc->lock);
 
-	if (S_ISREG(inode->i_mode) && oldsize != attr->size) {
-		truncate_pagecache(inode, oldsize, attr->size);
-		invalidate_inode_pages2(inode->i_mapping);
+	if (S_ISREG(inode->i_mode)) {
+		bool inval = false;
+
+		if (oldsize != attr->size) {
+			truncate_pagecache(inode, oldsize, attr->size);
+			inval = true;
+		} else if (fc->auto_inval_data) {
+			struct timespec new_mtime = {
+				.tv_sec = attr->mtime,
+				.tv_nsec = attr->mtimensec,
+			};
+
+			/*
+			 * Auto inval mode also checks and invalidates if mtime
+			 * has changed.
+			 */
+			if (!timespec_equal(&old_mtime, &new_mtime))
+				inval = true;
+		}
+
+		if (inval)
+			invalidate_inode_pages2(inode->i_mapping);
 	}
 }
 
@@ -834,6 +855,8 @@
 				fc->big_writes = 1;
 			if (arg->flags & FUSE_DONT_MASK)
 				fc->dont_mask = 1;
+			if (arg->flags & FUSE_AUTO_INVAL_DATA)
+				fc->auto_inval_data = 1;
 		} else {
 			ra_pages = fc->max_read / PAGE_CACHE_SIZE;
 			fc->no_lock = 1;
@@ -859,7 +882,8 @@
 	arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
 	arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
 		FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
-		FUSE_FLOCK_LOCKS;
+		FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
+		FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA;
 	req->in.h.opcode = FUSE_INIT;
 	req->in.numargs = 1;
 	req->in.args[0].size = sizeof(*arg);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 3a56c8d..22255d9 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -52,7 +52,7 @@
 		/*
 		 * If it's a fully non-blocking write attempt and we cannot
 		 * lock the buffer then redirty the page.  Note that this can
-		 * potentially cause a busy-wait loop from pdflush and kswapd
+		 * potentially cause a busy-wait loop from flusher thread and kswapd
 		 * activity, but those code paths have their own higher-level
 		 * throttling.
 		 */
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 5fd51a5..b7ec224 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -236,10 +236,10 @@
  * hfs_mdb_commit()
  *
  * Description:
- *   This updates the MDB on disk (look also at hfs_write_super()).
+ *   This updates the MDB on disk.
  *   It does not check, if the superblock has been modified, or
  *   if the filesystem has been mounted read-only. It is mainly
- *   called by hfs_write_super() and hfs_btree_extend().
+ *   called by hfs_sync_fs() and flush_mdb().
  * Input Variable(s):
  *   struct hfs_mdb *mdb: Pointer to the hfs MDB
  *   int backup;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 425c2f2..0935750 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -534,8 +534,8 @@
 		ret = 1;
 	} else if (journal->j_committing_transaction) {
 		/*
-		 * If ext3_write_super() recently started a commit, then we
-		 * have to wait for completion of that transaction
+		 * If commit has been started, then we have to wait for
+		 * completion of that transaction.
 		 */
 		if (ptid)
 			*ptid = journal->j_committing_transaction->t_tid;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e9a3c4c..e149b99 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -612,8 +612,8 @@
 		ret = 1;
 	} else if (journal->j_committing_transaction) {
 		/*
-		 * If ext3_write_super() recently started a commit, then we
-		 * have to wait for completion of that transaction
+		 * If commit has been started, then we have to wait for
+		 * completion of that transaction.
 		 */
 		if (ptid)
 			*ptid = journal->j_committing_transaction->t_tid;
@@ -1377,7 +1377,7 @@
  * Update a journal's errno.  Write updated superblock to disk waiting for IO
  * to complete.
  */
-static void jbd2_journal_update_sb_errno(journal_t *journal)
+void jbd2_journal_update_sb_errno(journal_t *journal)
 {
 	journal_superblock_t *sb = journal->j_superblock;
 
@@ -1390,6 +1390,7 @@
 
 	jbd2_write_superblock(journal, WRITE_SYNC);
 }
+EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
 
 /*
  * Read the superblock for a given journal, performing initial
diff --git a/fs/namei.c b/fs/namei.c
index 05480a6..51e9aa6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2414,7 +2414,7 @@
 		goto out;
 	}
 
-	mode = op->mode & S_IALLUGO;
+	mode = op->mode;
 	if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
 		mode &= ~current_umask();
 
@@ -2452,7 +2452,7 @@
 	}
 
 	if (open_flag & O_CREAT) {
-		error = may_o_create(&nd->path, dentry, op->mode);
+		error = may_o_create(&nd->path, dentry, mode);
 		if (error) {
 			create_error = error;
 			if (open_flag & O_EXCL)
@@ -2489,6 +2489,10 @@
 			dput(dentry);
 			dentry = file->f_path.dentry;
 		}
+		if (create_error && dentry->d_inode == NULL) {
+			error = create_error;
+			goto out;
+		}
 		goto looked_up;
 	}
 
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 6522cac..6a10812 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -676,17 +676,13 @@
 	.alloc_inode    = nilfs_alloc_inode,
 	.destroy_inode  = nilfs_destroy_inode,
 	.dirty_inode    = nilfs_dirty_inode,
-	/* .write_inode    = nilfs_write_inode, */
-	/* .drop_inode	  = nilfs_drop_inode, */
 	.evict_inode    = nilfs_evict_inode,
 	.put_super      = nilfs_put_super,
-	/* .write_super    = nilfs_write_super, */
 	.sync_fs        = nilfs_sync_fs,
 	.freeze_fs	= nilfs_freeze,
 	.unfreeze_fs	= nilfs_unfreeze,
 	.statfs         = nilfs_statfs,
 	.remount_fs     = nilfs_remount,
-	/* .umount_begin */
 	.show_options = nilfs_show_options
 };
 
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 6eee417..be1267a 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -107,8 +107,6 @@
 	 * used for
 	 * - loading the latest checkpoint exclusively.
 	 * - allocating a new full segment.
-	 * - protecting s_dirt in the super_block struct
-	 *   (see nilfs_write_super) and the following fields.
 	 */
 	struct buffer_head     *ns_sbh[2];
 	struct nilfs_super_block *ns_sbp[2];
diff --git a/fs/open.c b/fs/open.c
index f3d96e7..e1f2cdb 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -717,7 +717,7 @@
 			 * here, so just reset the state.
 			 */
 			file_reset_write(f);
-			mnt_drop_write(f->f_path.mnt);
+			__mnt_drop_write(f->f_path.mnt);
 		}
 	}
 cleanup_file:
@@ -852,9 +852,10 @@
 	int lookup_flags = 0;
 	int acc_mode;
 
-	if (!(flags & O_CREAT))
-		mode = 0;
-	op->mode = mode;
+	if (flags & O_CREAT)
+		op->mode = (mode & S_IALLUGO) | S_IFREG;
+	else
+		op->mode = 0;
 
 	/* Must never be set by userspace */
 	flags &= ~FMODE_NONOTIFY;
diff --git a/fs/super.c b/fs/super.c
index b05cf47..0902cfa 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -537,46 +537,6 @@
 EXPORT_SYMBOL(drop_super);
 
 /**
- * sync_supers - helper for periodic superblock writeback
- *
- * Call the write_super method if present on all dirty superblocks in
- * the system.  This is for the periodic writeback used by most older
- * filesystems.  For data integrity superblock writeback use
- * sync_filesystems() instead.
- *
- * Note: check the dirty flag before waiting, so we don't
- * hold up the sync while mounting a device. (The newly
- * mounted device won't need syncing.)
- */
-void sync_supers(void)
-{
-	struct super_block *sb, *p = NULL;
-
-	spin_lock(&sb_lock);
-	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (hlist_unhashed(&sb->s_instances))
-			continue;
-		if (sb->s_op->write_super && sb->s_dirt) {
-			sb->s_count++;
-			spin_unlock(&sb_lock);
-
-			down_read(&sb->s_umount);
-			if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
-				sb->s_op->write_super(sb);
-			up_read(&sb->s_umount);
-
-			spin_lock(&sb_lock);
-			if (p)
-				__put_super(p);
-			p = sb;
-		}
-	}
-	if (p)
-		__put_super(p);
-	spin_unlock(&sb_lock);
-}
-
-/**
  *	iterate_supers - call function for all active superblocks
  *	@f: function to call
  *	@arg: argument to pass to it
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 35389ca..7bd6e72 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -37,11 +37,11 @@
  *
  * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
  * implement. However, this is not true for 'ubifs_writepage()', which may be
- * called with @i_mutex unlocked. For example, when pdflush is doing background
- * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal"
- * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the
- * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()'
- * we are only guaranteed that the page is locked.
+ * called with @i_mutex unlocked. For example, when flusher thread is doing
+ * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
+ * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
+ * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
+ * 'ubifs_writepage()' we are only guaranteed that the page is locked.
  *
  * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
  * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1c766c3..c3fa6c5 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -303,7 +303,7 @@
 	mutex_lock(&ui->ui_mutex);
 	/*
 	 * Due to races between write-back forced by budgeting
-	 * (see 'sync_some_inodes()') and pdflush write-back, the inode may
+	 * (see 'sync_some_inodes()') and background write-back, the inode may
 	 * have already been synchronized, do not do this again. This might
 	 * also happen if it was synchronized in an VFS operation, e.g.
 	 * 'ubifs_link()'.
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 2c744c7..26a92fc 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -491,11 +491,11 @@
 
 acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
 
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags);
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
 
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
 
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
 
 acpi_status acpi_leave_sleep_state(u8 sleep_state);
 
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 3af87de..3d00bd5 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -803,7 +803,7 @@
 
 /* Sleep function dispatch */
 
-typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags);
+typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state);
 
 struct acpi_sleep_functions {
 	ACPI_SLEEP_FUNCTION legacy_function;
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 580a6d3..c04e0db 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -26,7 +26,13 @@
 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
 	if (unlikely(atomic_xchg(count, 0) != 1))
-		fail_fn(count);
+		/*
+		 * We failed to acquire the lock, so mark it contended
+		 * to ensure that any waiting tasks are woken up by the
+		 * unlock slow path.
+		 */
+		if (likely(atomic_xchg(count, -1) != 1))
+			fail_fn(count);
 }
 
 /**
@@ -43,7 +49,8 @@
 __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
 	if (unlikely(atomic_xchg(count, 0) != 1))
-		return fail_fn(count);
+		if (likely(atomic_xchg(count, -1) != 1))
+			return fail_fn(count);
 	return 0;
 }
 
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 7ff5c99..c78bb99 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -213,9 +213,12 @@
 	{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 5805686..dc3a8cd 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -964,6 +964,8 @@
 #define RADEON_INFO_IB_VM_MAX_SIZE	0x0f
 /* max pipes - needed for compute shaders */
 #define RADEON_INFO_MAX_PIPES		0x10
+/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
+#define RADEON_INFO_TIMESTAMP		0x11
 
 struct drm_radeon_info {
 	uint32_t		request;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index d9a7544..1f2c1c7 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -195,6 +195,7 @@
 header-y += sock_diag.h
 header-y += inet_diag.h
 header-y += unix_diag.h
+header-y += packet_diag.h
 header-y += inotify.h
 header-y += input.h
 header-y += ioctl.h
@@ -391,6 +392,7 @@
 header-y += v4l2-mediabus.h
 header-y += v4l2-subdev.h
 header-y += veth.h
+header-y += vfio.h
 header-y += vhost.h
 header-y += videodev2.h
 header-y += virtio_9p.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3ad510b..4f2a762 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -96,7 +96,7 @@
 void acpi_numa_slit_init (struct acpi_table_slit *slit);
 void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
 void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
-void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
+int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
 void acpi_numa_arch_fixup(void);
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c97c6b9..2a9a9ab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -124,7 +124,6 @@
 void bdi_start_background_writeback(struct backing_dev_info *bdi);
 int bdi_writeback_thread(void *data);
 int bdi_has_dirty_io(struct backing_dev_info *bdi);
-void bdi_arm_supers_timer(void);
 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
 
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 3c80885..6ba45d2 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -89,11 +89,18 @@
 #define  BCMA_CC_CHIPST_4313_OTP_PRESENT	2
 #define  BCMA_CC_CHIPST_4331_SPROM_PRESENT	2
 #define  BCMA_CC_CHIPST_4331_OTP_PRESENT	4
+#define  BCMA_CC_CHIPST_43228_ILP_DIV_EN	0x00000001
+#define  BCMA_CC_CHIPST_43228_OTP_PRESENT	0x00000002
+#define  BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL	0x00000004
+#define  BCMA_CC_CHIPST_43228_SDIO_MODE		0x00000008
+#define  BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT	0x00000010
+#define  BCMA_CC_CHIPST_43228_SDIO_RESET	0x00000020
 #define  BCMA_CC_CHIPST_4706_PKG_OPTION		BIT(0) /* 0: full-featured package 1: low-cost package */
 #define  BCMA_CC_CHIPST_4706_SFLASH_PRESENT	BIT(1) /* 0: parallel, 1: serial flash is present */
 #define  BCMA_CC_CHIPST_4706_SFLASH_TYPE	BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
 #define  BCMA_CC_CHIPST_4706_MIPS_BENDIAN	BIT(3) /* 0: little, 1: big endian */
 #define  BCMA_CC_CHIPST_4706_PCIE1_DISABLE	BIT(5) /* PCIE1 enable strap pin */
+#define  BCMA_CC_CHIPST_5357_NAND_BOOT		BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */
 #define BCMA_CC_JCMD			0x0030		/* Rev >= 10 only */
 #define  BCMA_CC_JCMD_START		0x80000000
 #define  BCMA_CC_JCMD_BUSY		0x80000000
@@ -260,6 +267,29 @@
 #define  BCMA_CC_SROM_CONTROL_SIZE_16K	0x00000004
 #define  BCMA_CC_SROM_CONTROL_SIZE_SHIFT	1
 #define  BCMA_CC_SROM_CONTROL_PRESENT	0x00000001
+/* Block 0x140 - 0x190 registers are chipset specific */
+#define BCMA_CC_4706_FLASHSCFG		0x18C		/* Flash struct configuration */
+#define  BCMA_CC_4706_FLASHSCFG_MASK	0x000000ff
+#define  BCMA_CC_4706_FLASHSCFG_SF1	0x00000001	/* 2nd serial flash present */
+#define  BCMA_CC_4706_FLASHSCFG_PF1	0x00000002	/* 2nd parallel flash present */
+#define  BCMA_CC_4706_FLASHSCFG_SF1_TYPE	0x00000004	/* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define  BCMA_CC_4706_FLASHSCFG_NF1	0x00000008	/* 2nd NAND flash present */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_MASK	0x000000f0
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_4MB	0x00000010	/* 4MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_8MB	0x00000020	/* 8MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_16MB	0x00000030	/* 16MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_32MB	0x00000040	/* 32MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_64MB	0x00000050	/* 64MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_128MB	0x00000060	/* 128MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_256MB	0x00000070	/* 256MB */
+/* NAND flash registers for BCM4706 (corerev = 31) */
+#define BCMA_CC_NFLASH_CTL		0x01A0
+#define  BCMA_CC_NFLASH_CTL_ERR		0x08000000
+#define BCMA_CC_NFLASH_CONF		0x01A4
+#define BCMA_CC_NFLASH_COL_ADDR		0x01A8
+#define BCMA_CC_NFLASH_ROW_ADDR		0x01AC
+#define BCMA_CC_NFLASH_DATA		0x01B0
+#define BCMA_CC_NFLASH_WAITCNT0		0x01B4
 /* 0x1E0 is defined as shared BCMA_CLKCTLST */
 #define BCMA_CC_HW_WORKAROUND		0x01E4 /* Hardware workaround (rev >= 20) */
 #define BCMA_CC_UART0_DATA		0x0300
@@ -319,6 +349,60 @@
 #define BCMA_CC_PLLCTL_ADDR		0x0660
 #define BCMA_CC_PLLCTL_DATA		0x0664
 #define BCMA_CC_SPROM			0x0800 /* SPROM beginning */
+/* NAND flash MLC controller registers (corerev >= 38) */
+#define BCMA_CC_NAND_REVISION		0x0C00
+#define BCMA_CC_NAND_CMD_START		0x0C04
+#define BCMA_CC_NAND_CMD_ADDR_X		0x0C08
+#define BCMA_CC_NAND_CMD_ADDR		0x0C0C
+#define BCMA_CC_NAND_CMD_END_ADDR	0x0C10
+#define BCMA_CC_NAND_CS_NAND_SELECT	0x0C14
+#define BCMA_CC_NAND_CS_NAND_XOR	0x0C18
+#define BCMA_CC_NAND_SPARE_RD0		0x0C20
+#define BCMA_CC_NAND_SPARE_RD4		0x0C24
+#define BCMA_CC_NAND_SPARE_RD8		0x0C28
+#define BCMA_CC_NAND_SPARE_RD12		0x0C2C
+#define BCMA_CC_NAND_SPARE_WR0		0x0C30
+#define BCMA_CC_NAND_SPARE_WR4		0x0C34
+#define BCMA_CC_NAND_SPARE_WR8		0x0C38
+#define BCMA_CC_NAND_SPARE_WR12		0x0C3C
+#define BCMA_CC_NAND_ACC_CONTROL	0x0C40
+#define BCMA_CC_NAND_CONFIG		0x0C48
+#define BCMA_CC_NAND_TIMING_1		0x0C50
+#define BCMA_CC_NAND_TIMING_2		0x0C54
+#define BCMA_CC_NAND_SEMAPHORE		0x0C58
+#define BCMA_CC_NAND_DEVID		0x0C60
+#define BCMA_CC_NAND_DEVID_X		0x0C64
+#define BCMA_CC_NAND_BLOCK_LOCK_STATUS	0x0C68
+#define BCMA_CC_NAND_INTFC_STATUS	0x0C6C
+#define BCMA_CC_NAND_ECC_CORR_ADDR_X	0x0C70
+#define BCMA_CC_NAND_ECC_CORR_ADDR	0x0C74
+#define BCMA_CC_NAND_ECC_UNC_ADDR_X	0x0C78
+#define BCMA_CC_NAND_ECC_UNC_ADDR	0x0C7C
+#define BCMA_CC_NAND_READ_ERROR_COUNT	0x0C80
+#define BCMA_CC_NAND_CORR_STAT_THRESHOLD	0x0C84
+#define BCMA_CC_NAND_READ_ADDR_X	0x0C90
+#define BCMA_CC_NAND_READ_ADDR		0x0C94
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR_X	0x0C98
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR	0x0C9C
+#define BCMA_CC_NAND_COPY_BACK_ADDR_X	0x0CA0
+#define BCMA_CC_NAND_COPY_BACK_ADDR	0x0CA4
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR_X	0x0CA8
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR	0x0CAC
+#define BCMA_CC_NAND_INV_READ_ADDR_X	0x0CB0
+#define BCMA_CC_NAND_INV_READ_ADDR	0x0CB4
+#define BCMA_CC_NAND_BLK_WR_PROTECT	0x0CC0
+#define BCMA_CC_NAND_ACC_CONTROL_CS1	0x0CD0
+#define BCMA_CC_NAND_CONFIG_CS1		0x0CD4
+#define BCMA_CC_NAND_TIMING_1_CS1	0x0CD8
+#define BCMA_CC_NAND_TIMING_2_CS1	0x0CDC
+#define BCMA_CC_NAND_SPARE_RD16		0x0D30
+#define BCMA_CC_NAND_SPARE_RD20		0x0D34
+#define BCMA_CC_NAND_SPARE_RD24		0x0D38
+#define BCMA_CC_NAND_SPARE_RD28		0x0D3C
+#define BCMA_CC_NAND_CACHE_ADDR		0x0D40
+#define BCMA_CC_NAND_CACHE_DATA		0x0D44
+#define BCMA_CC_NAND_CTRL_CONFIG	0x0D48
+#define BCMA_CC_NAND_CTRL_STATUS	0x0D4C
 
 /* Divider allocation in 4716/47162/5356 */
 #define BCMA_CC_PMU5_MAINPLL_CPU	1
@@ -409,6 +493,13 @@
 /* 4313 Chip specific ChipControl register bits */
 #define BCMA_CCTRL_4313_12MA_LED_DRIVE		0x00000007	/* 12 mA drive strengh for later 4313 */
 
+/* BCM5357 ChipControl register bits */
+#define BCMA_CHIPCTL_5357_EXTPA			BIT(14)
+#define BCMA_CHIPCTL_5357_ANT_MUX_2O3		BIT(15)
+#define BCMA_CHIPCTL_5357_NFLASH		BIT(16)
+#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE	BIT(18)
+#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE	BIT(19)
+
 /* Data for the PMU, if available.
  * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
  */
@@ -424,6 +515,26 @@
 	u32 window_size;
 };
 
+#ifdef CONFIG_BCMA_SFLASH
+struct bcma_sflash {
+	bool present;
+	u32 window;
+	u32 blocksize;
+	u16 numblocks;
+	u32 size;
+};
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+struct mtd_info;
+
+struct bcma_nflash {
+	bool present;
+
+	struct mtd_info *mtd;
+};
+#endif
+
 struct bcma_serial_port {
 	void *regs;
 	unsigned long clockspeed;
@@ -444,6 +555,12 @@
 	struct bcma_chipcommon_pmu pmu;
 #ifdef CONFIG_BCMA_DRIVER_MIPS
 	struct bcma_pflash pflash;
+#ifdef CONFIG_BCMA_SFLASH
+	struct bcma_sflash sflash;
+#endif
+#ifdef CONFIG_BCMA_NFLASH
+	struct bcma_nflash nflash;
+#endif
 
 	int nr_serial_ports;
 	struct bcma_serial_port serial_ports[4];
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 5a71d57..6c9cb93 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -11,11 +11,13 @@
 #define  BCMA_CLKCTLST_HAVEHTREQ	0x00000010 /* HT available request */
 #define  BCMA_CLKCTLST_HWCROFF		0x00000020 /* Force HW clock request off */
 #define  BCMA_CLKCTLST_EXTRESREQ	0x00000700 /* Mask of external resource requests */
+#define  BCMA_CLKCTLST_EXTRESREQ_SHIFT	8
 #define  BCMA_CLKCTLST_HAVEALP		0x00010000 /* ALP available */
 #define  BCMA_CLKCTLST_HAVEHT		0x00020000 /* HT available */
 #define  BCMA_CLKCTLST_BP_ON_ALP	0x00040000 /* RO: running on ALP clock */
 #define  BCMA_CLKCTLST_BP_ON_HT		0x00080000 /* RO: running on HT clock */
 #define  BCMA_CLKCTLST_EXTRESST		0x07000000 /* Mask of external resource status */
+#define  BCMA_CLKCTLST_EXTRESST_SHIFT	24
 /* Is there any BCM4328 on BCMA bus? */
 #define  BCMA_CLKCTLST_4328A0_HAVEHT	0x00010000 /* 4328a0 has reversed bits */
 #define  BCMA_CLKCTLST_4328A0_HAVEALP	0x00020000 /* 4328a0 has reversed bits */
@@ -83,4 +85,6 @@
 							 * (2 ZettaBytes), high 32 bits
 							 */
 
+#define BCMA_SFLASH			0x1c000000
+
 #endif /* LINUX_BCMA_REGS_H_ */
diff --git a/include/linux/can.h b/include/linux/can.h
index 018055e..e52958d 100644
--- a/include/linux/can.h
+++ b/include/linux/can.h
@@ -74,20 +74,21 @@
 /*
  * defined bits for canfd_frame.flags
  *
- * As the default for CAN FD should be to support the high data rate in the
- * payload section of the frame (HDR) and to support up to 64 byte in the
- * data section (EDL) the bits are only set in the non-default case.
- * Btw. as long as there's no real implementation for CAN FD network driver
- * these bits are only preliminary.
+ * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to
+ * be set in the CAN frame bitstream on the wire. The EDL bit switch turns
+ * the CAN controllers bitstream processor into the CAN FD mode which creates
+ * two new options within the CAN FD frame specification:
  *
- * RX: NOHDR/NOEDL - info about received CAN FD frame
- *     ESI         - bit from originating CAN controller
- * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller
- *     ESI         - bit is set by local CAN controller
+ * Bit Rate Switch - to indicate a second bitrate is/was used for the payload
+ * Error State Indicator - represents the error state of the transmitting node
+ *
+ * As the CANFD_ESI bit is internally generated by the transmitting CAN
+ * controller only the CANFD_BRS bit is relevant for real CAN controllers when
+ * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make
+ * sense for virtual CAN interfaces to test applications with echoed frames.
  */
-#define CANFD_NOHDR 0x01 /* frame without high data rate */
-#define CANFD_NOEDL 0x02 /* frame without extended data length */
-#define CANFD_ESI   0x04 /* error state indicator */
+#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */
+#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */
 
 /**
  * struct canfd_frame - CAN flexible data rate frame structure
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 133ddcf..ef65814 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -22,7 +22,7 @@
 extern int fragmentation_index(struct zone *zone, unsigned int order);
 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
 			int order, gfp_t gfp_mask, nodemask_t *mask,
-			bool sync);
+			bool sync, bool *contended);
 extern int compact_pgdat(pg_data_t *pgdat, int order);
 extern unsigned long compaction_suitable(struct zone *zone, int order);
 
@@ -64,7 +64,7 @@
 #else
 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
-			bool sync)
+			bool sync, bool *contended)
 {
 	return COMPACT_CONTINUE;
 }
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 103adc6..ec45ccd 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -503,6 +503,8 @@
 extern int __init efi_uart_console_only (void);
 extern void efi_initialize_iomem_resources(struct resource *code_resource,
 		struct resource *data_resource, struct resource *bss_resource);
+extern unsigned long efi_get_time(void);
+extern int efi_set_rtc_mmss(unsigned long nowtime);
 extern void efi_reserve_boot_services(void);
 extern struct efi_memory_map memmap;
 
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 21eff41..fcb4f8e 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -45,8 +45,10 @@
 				 * bits) in Mbps. Please use
 				 * ethtool_cmd_speed()/_set() to
 				 * access it */
-	__u8	eth_tp_mdix;
-	__u8	reserved2;
+	__u8	eth_tp_mdix;	/* twisted pair MDI-X status */
+	__u8    eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set,
+				   * link should be renegotiated if necessary
+				   */
 	__u32	lp_advertising;	/* Features the link partner advertises */
 	__u32	reserved[2];
 };
@@ -1229,10 +1231,13 @@
 #define AUTONEG_DISABLE		0x00
 #define AUTONEG_ENABLE		0x01
 
-/* Mode MDI or MDI-X */
-#define ETH_TP_MDI_INVALID	0x00
-#define ETH_TP_MDI		0x01
-#define ETH_TP_MDI_X		0x02
+/* MDI or MDI-X status/control - if MDI/MDI_X/AUTO is set then
+ * the driver is required to renegotiate link
+ */
+#define ETH_TP_MDI_INVALID	0x00 /* status: unknown; control: unsupported */
+#define ETH_TP_MDI		0x01 /* status: MDI;     control: force MDI */
+#define ETH_TP_MDI_X		0x02 /* status: MDI-X;   control: force MDI-X */
+#define ETH_TP_MDI_AUTO		0x03 /*                  control: auto-select */
 
 /* Wake-On-Lan options. */
 #define WAKE_PHY		(1 << 0)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 38dba16..aa11047 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1491,7 +1491,6 @@
 struct super_block {
 	struct list_head	s_list;		/* Keep this first */
 	dev_t			s_dev;		/* search index; _not_ kdev_t */
-	unsigned char		s_dirt;
 	unsigned char		s_blocksize_bits;
 	unsigned long		s_blocksize;
 	loff_t			s_maxbytes;	/* Max file size */
@@ -1861,7 +1860,6 @@
 	int (*drop_inode) (struct inode *);
 	void (*evict_inode) (struct inode *);
 	void (*put_super) (struct super_block *);
-	void (*write_super) (struct super_block *);
 	int (*sync_fs)(struct super_block *sb, int wait);
 	int (*freeze_fs) (struct super_block *);
 	int (*unfreeze_fs) (struct super_block *);
@@ -2397,7 +2395,6 @@
 			   int datasync);
 extern int vfs_fsync(struct file *file, int datasync);
 extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
-extern void sync_supers(void);
 extern void emergency_sync(void);
 extern void emergency_remount(void);
 #ifdef CONFIG_BLOCK
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index af961d6..642928c 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -306,9 +306,10 @@
 
 static inline void
 perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
-		       u64 count, struct pt_regs *regs, void *head)
+		       u64 count, struct pt_regs *regs, void *head,
+		       struct task_struct *task)
 {
-	perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
+	perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
 }
 #endif
 
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9303348..d8c713e 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -57,6 +57,9 @@
  *
  * 7.19
  *  - add FUSE_FALLOCATE
+ *
+ * 7.20
+ *  - add FUSE_AUTO_INVAL_DATA
  */
 
 #ifndef _LINUX_FUSE_H
@@ -88,7 +91,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 19
+#define FUSE_KERNEL_MINOR_VERSION 20
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -163,10 +166,19 @@
 /**
  * INIT request/reply flags
  *
+ * FUSE_ASYNC_READ: asynchronous read requests
  * FUSE_POSIX_LOCKS: remote locking for POSIX file locks
+ * FUSE_FILE_OPS: kernel sends file handle for fstat, etc... (not yet supported)
+ * FUSE_ATOMIC_O_TRUNC: handles the O_TRUNC open flag in the filesystem
  * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
+ * FUSE_BIG_WRITES: filesystem can handle write size larger than 4kB
  * FUSE_DONT_MASK: don't apply umask to file mode on create operations
+ * FUSE_SPLICE_WRITE: kernel supports splice write on the device
+ * FUSE_SPLICE_MOVE: kernel supports splice move on the device
+ * FUSE_SPLICE_READ: kernel supports splice read on the device
  * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks
+ * FUSE_HAS_IOCTL_DIR: kernel supports ioctl on directories
+ * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages
  */
 #define FUSE_ASYNC_READ		(1 << 0)
 #define FUSE_POSIX_LOCKS	(1 << 1)
@@ -175,7 +187,12 @@
 #define FUSE_EXPORT_SUPPORT	(1 << 4)
 #define FUSE_BIG_WRITES		(1 << 5)
 #define FUSE_DONT_MASK		(1 << 6)
+#define FUSE_SPLICE_WRITE	(1 << 7)
+#define FUSE_SPLICE_MOVE	(1 << 8)
+#define FUSE_SPLICE_READ	(1 << 9)
 #define FUSE_FLOCK_LOCKS	(1 << 10)
+#define FUSE_HAS_IOCTL_DIR	(1 << 11)
+#define FUSE_AUTO_INVAL_DATA	(1 << 12)
 
 /**
  * CUSE INIT request/reply flags
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index bb7f309..305f23c 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -22,7 +22,7 @@
  *
  * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
  * - bit 26 is the NMI_MASK
- * - bit 28 is the PREEMPT_ACTIVE flag
+ * - bit 27 is the PREEMPT_ACTIVE flag
  *
  * PREEMPT_MASK: 0x000000ff
  * SOFTIRQ_MASK: 0x0000ff00
diff --git a/include/linux/hash.h b/include/linux/hash.h
index b80506b..24df9e7 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -67,4 +67,14 @@
 {
 	return hash_long((unsigned long)ptr, bits);
 }
+
+static inline u32 hash32_ptr(const void *ptr)
+{
+	unsigned long val = (unsigned long)ptr;
+
+#if BITS_PER_LONG == 64
+	val ^= (val >> 32);
+#endif
+	return (u32)val;
+}
 #endif /* _LINUX_HASH_H */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index f0e69c6..9adcc29 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -92,6 +92,7 @@
 #define ARPHRD_PHONET	820		/* PhoNet media type		*/
 #define ARPHRD_PHONET_PIPE 821		/* PhoNet pipe header		*/
 #define ARPHRD_CAIF	822		/* CAIF media type		*/
+#define ARPHRD_IP6GRE	823		/* GRE over IPv6		*/
 
 #define ARPHRD_VOID	  0xFFFF	/* Void type, nothing is known */
 #define ARPHRD_NONE	  0xFFFE	/* zero header length */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 6960fc1..6d88a7f 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -67,6 +67,9 @@
 	struct netpoll *np;
 #endif
 
+	s32 priority; /* lower number ~ higher priority */
+	u16 queue_id;
+	struct list_head qom_list; /* node in queue override mapping list */
 	long mode_priv[0];
 };
 
@@ -96,21 +99,6 @@
 }
 #endif
 
-static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
-				      struct sk_buff *skb)
-{
-	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
-		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
-	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
-
-	skb->dev = port->dev;
-	if (unlikely(netpoll_tx_running(port->dev))) {
-		team_netpoll_send_skb(port, skb);
-		return 0;
-	}
-	return dev_queue_xmit(skb);
-}
-
 struct team_mode_ops {
 	int (*init)(struct team *team);
 	void (*exit)(struct team *team);
@@ -120,7 +108,7 @@
 	bool (*transmit)(struct team *team, struct sk_buff *skb);
 	int (*port_enter)(struct team *team, struct team_port *port);
 	void (*port_leave)(struct team *team, struct team_port *port);
-	void (*port_change_mac)(struct team *team, struct team_port *port);
+	void (*port_change_dev_addr)(struct team *team, struct team_port *port);
 	void (*port_enabled)(struct team *team, struct team_port *port);
 	void (*port_disabled)(struct team *team, struct team_port *port);
 };
@@ -130,6 +118,7 @@
 	TEAM_OPTION_TYPE_STRING,
 	TEAM_OPTION_TYPE_BINARY,
 	TEAM_OPTION_TYPE_BOOL,
+	TEAM_OPTION_TYPE_S32,
 };
 
 struct team_option_inst_info {
@@ -146,6 +135,7 @@
 			u32 len;
 		} bin_val;
 		bool bool_val;
+		s32 s32_val;
 	} data;
 	struct team_option_inst_info *info;
 };
@@ -197,9 +187,26 @@
 
 	const struct team_mode *mode;
 	struct team_mode_ops ops;
+	bool queue_override_enabled;
+	struct list_head *qom_lists; /* array of queue override mapping lists */
 	long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
+static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
+				      struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
+	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
+
+	skb->dev = port->dev;
+	if (unlikely(netpoll_tx_running(team->dev))) {
+		team_netpoll_send_skb(port, skb);
+		return 0;
+	}
+	return dev_queue_xmit(skb);
+}
+
 static inline struct hlist_head *team_port_index_hash(struct team *team,
 						      int port_index)
 {
@@ -231,7 +238,7 @@
 	return NULL;
 }
 
-extern int team_port_set_team_mac(struct team_port *port);
+extern int team_port_set_team_dev_addr(struct team_port *port);
 extern int team_options_register(struct team *team,
 				 const struct team_option *option,
 				 size_t option_count);
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 5efff60..8c5035ac 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -75,6 +75,9 @@
 	IFLA_GRE_TTL,
 	IFLA_GRE_TOS,
 	IFLA_GRE_PMTUDISC,
+	IFLA_GRE_ENCAP_LIMIT,
+	IFLA_GRE_FLOWINFO,
+	IFLA_GRE_FLAGS,
 	__IFLA_GRE_MAX,
 };
 
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a810987..e6ff12d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -74,8 +74,6 @@
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-struct vlan_info;
-
 static inline int is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
@@ -101,6 +99,8 @@
 				const struct net_device *by_dev);
 extern void vlan_vids_del_by_dev(struct net_device *dev,
 				 const struct net_device *by_dev);
+
+extern bool vlan_uses_dev(const struct net_device *dev);
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -151,6 +151,11 @@
 					const struct net_device *by_dev)
 {
 }
+
+static inline bool vlan_uses_dev(const struct net_device *dev)
+{
+	return false;
+}
 #endif
 
 /**
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index b76b4a8..be91f34 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -87,6 +87,8 @@
 #define ADF4350_MAX_BANDSEL_CLK		125000 /* Hz */
 #define ADF4350_MAX_FREQ_REFIN		250000000 /* Hz */
 #define ADF4350_MAX_MODULUS		4095
+#define ADF4350_MAX_R_CNT		1023
+
 
 /**
  * struct adf4350_platform_data - platform specific information
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 67f9dda..d032780 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -104,9 +104,14 @@
 #define IN_DEV_ANDCONF(in_dev, attr) \
 	(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
 	 IN_DEV_CONF_GET((in_dev), attr))
-#define IN_DEV_ORCONF(in_dev, attr) \
-	(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \
+
+#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
+	(IPV4_DEVCONF_ALL(net, attr) || \
 	 IN_DEV_CONF_GET((in_dev), attr))
+
+#define IN_DEV_ORCONF(in_dev, attr) \
+	IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
+
 #define IN_DEV_MAXCONF(in_dev, attr) \
 	(max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
 	     IN_DEV_CONF_GET((in_dev), attr)))
@@ -133,6 +138,8 @@
 					IN_DEV_ORCONF((in_dev), \
 						      PROMOTE_SECONDARIES)
 #define IN_DEV_ROUTE_LOCALNET(in_dev)	IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
+#define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)	\
+	IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET)
 
 #define IN_DEV_RX_REDIRECTS(in_dev) \
 	((IN_DEV_FORWARD(in_dev) && \
diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h
index f875b31..16625d7 100644
--- a/include/linux/input/eeti_ts.h
+++ b/include/linux/input/eeti_ts.h
@@ -2,6 +2,7 @@
 #define LINUX_INPUT_EETI_TS_H
 
 struct eeti_ts_platform_data {
+	int irq_gpio;
 	unsigned int irq_active_high;
 };
 
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 54d6d69..7e83370 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -20,6 +20,7 @@
 #define __LINUX_IOMMU_H
 
 #include <linux/errno.h>
+#include <linux/types.h>
 
 #define IOMMU_READ	(1)
 #define IOMMU_WRITE	(2)
@@ -30,6 +31,7 @@
 struct bus_type;
 struct device;
 struct iommu_domain;
+struct notifier_block;
 
 /* iommu fault flags */
 #define IOMMU_FAULT_READ	0x0
diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h
index bf22b03..48af63c 100644
--- a/include/linux/ip6_tunnel.h
+++ b/include/linux/ip6_tunnel.h
@@ -31,4 +31,21 @@
 	struct in6_addr raddr;	/* remote tunnel end-point address */
 };
 
+struct ip6_tnl_parm2 {
+	char name[IFNAMSIZ];	/* name of tunnel device */
+	int link;		/* ifindex of underlying L2 interface */
+	__u8 proto;		/* tunnel protocol */
+	__u8 encap_limit;	/* encapsulation limit for tunnel */
+	__u8 hop_limit;		/* hop limit for tunnel */
+	__be32 flowinfo;	/* traffic class and flowlabel for tunnel */
+	__u32 flags;		/* tunnel flags */
+	struct in6_addr laddr;	/* local tunnel end-point address */
+	struct in6_addr raddr;	/* remote tunnel end-point address */
+
+	__be16			i_flags;
+	__be16			o_flags;
+	__be32			i_key;
+	__be32			o_key;
+};
+
 #endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 379e433..879db26 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -369,6 +369,7 @@
 	__u8			rcv_tclass;
 
 	__u32			dst_cookie;
+	__u32			rx_dst_cookie;
 
 	struct ipv6_mc_socklist	__rcu *ipv6_mc_list;
 	struct ipv6_ac_socklist	*ipv6_ac_list;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 553fb66..216b0ba 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -349,6 +349,7 @@
 	IRQCHIP_MASK_ON_SUSPEND		= (1 <<  2),
 	IRQCHIP_ONOFFLINE_ENABLED	= (1 <<  3),
 	IRQCHIP_SKIP_SET_WAKE		= (1 <<  4),
+	IRQCHIP_ONESHOT_SAFE		= (1 <<  5),
 };
 
 /* This include will go away once we isolated irq_desc usage to core code */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f334c7f..3efc43f 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1125,6 +1125,7 @@
 extern int	   jbd2_journal_recover    (journal_t *journal);
 extern int	   jbd2_journal_wipe       (journal_t *, int);
 extern int	   jbd2_journal_skip_recovery	(journal_t *);
+extern void	   jbd2_journal_update_sb_errno(journal_t *);
 extern void	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
 				unsigned long, int);
 extern void	   __jbd2_journal_abort_hard	(journal_t *);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 265e2c3..05e3c2c 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -39,9 +39,6 @@
 # error Invalid value of HZ.
 #endif
 
-/* LATCH is used in the interval timer and ftape setup. */
-#define LATCH  ((CLOCK_TICK_RATE + HZ/2) / HZ)	/* For divider */
-
 /* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
  * improve accuracy by shifting LSH bits, hence calculating:
  *     (NOM << LSH) / DEN
@@ -54,18 +51,30 @@
 #define SH_DIV(NOM,DEN,LSH) (   (((NOM) / (DEN)) << (LSH))              \
                              + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
 
-/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
-#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
+#ifdef CLOCK_TICK_RATE
+/* LATCH is used in the interval timer and ftape setup. */
+# define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ)	/* For divider */
 
-/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
-#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
+/*
+ * HZ is the requested value. However the CLOCK_TICK_RATE may not allow
+ * for exactly HZ. So SHIFTED_HZ is high res HZ ("<< 8" is for accuracy)
+ */
+# define SHIFTED_HZ (SH_DIV(CLOCK_TICK_RATE, LATCH, 8))
+#else
+# define SHIFTED_HZ (HZ << 8)
+#endif
+
+/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
+#define TICK_NSEC (SH_DIV(1000000UL * 1000, SHIFTED_HZ, 8))
 
 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
 
-/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and	*/
-/* a value TUSEC for TICK_USEC (can be set bij adjtimex)		*/
-#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
+/*
+ * TICK_USEC_TO_NSEC is the time between ticks in nsec assuming SHIFTED_HZ and
+ * a value TUSEC for TICK_USEC (can be set bij adjtimex)
+ */
+#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV(TUSEC * USER_HZ * 1000, SHIFTED_HZ, 8))
 
 /* some arch's have a small-data section that can be accessed register-relative
  * but that can only take up to, say, 4-byte variables. jiffies being part of
@@ -303,7 +312,13 @@
 extern unsigned long timeval_to_jiffies(const struct timeval *value);
 extern void jiffies_to_timeval(const unsigned long jiffies,
 			       struct timeval *value);
+
 extern clock_t jiffies_to_clock_t(unsigned long x);
+static inline clock_t jiffies_delta_to_clock_t(long delta)
+{
+	return jiffies_to_clock_t(max(0L, delta));
+}
+
 extern unsigned long clock_t_to_jiffies(unsigned long x);
 extern u64 jiffies_64_to_clock_t(u64 x);
 extern u64 nsec_to_clock_t(u64 x);
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 0647258..42d9e86 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -75,8 +75,6 @@
 #define KDB_FLAG_CATASTROPHIC	(1 << 1) /* A catastrophic event has occurred */
 #define KDB_FLAG_CMD_INTERRUPT	(1 << 2) /* Previous command was interrupted */
 #define KDB_FLAG_NOIPI		(1 << 3) /* Do not send IPIs */
-#define KDB_FLAG_ONLY_DO_DUMP	(1 << 4) /* Only do a dump, used when
-					  * kdb is off */
 #define KDB_FLAG_NO_CONSOLE	(1 << 5) /* No console is available,
 					  * kdb is disabled */
 #define KDB_FLAG_NO_VT_CONSOLE	(1 << 6) /* No VT console is available, do
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 7cccafe..6c40684 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -377,5 +377,88 @@
 extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
 			  struct mii_ioctl_data *mii_data, int cmd);
 
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+	u32 supported = 0;
+
+	if (eee_cap & MDIO_EEE_100TX)
+		supported |= SUPPORTED_100baseT_Full;
+	if (eee_cap & MDIO_EEE_1000T)
+		supported |= SUPPORTED_1000baseT_Full;
+	if (eee_cap & MDIO_EEE_10GT)
+		supported |= SUPPORTED_10000baseT_Full;
+	if (eee_cap & MDIO_EEE_1000KX)
+		supported |= SUPPORTED_1000baseKX_Full;
+	if (eee_cap & MDIO_EEE_10GKX4)
+		supported |= SUPPORTED_10000baseKX4_Full;
+	if (eee_cap & MDIO_EEE_10GKR)
+		supported |= SUPPORTED_10000baseKR_Full;
+
+	return supported;
+}
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+	u32 adv = 0;
+
+	if (eee_adv & MDIO_EEE_100TX)
+		adv |= ADVERTISED_100baseT_Full;
+	if (eee_adv & MDIO_EEE_1000T)
+		adv |= ADVERTISED_1000baseT_Full;
+	if (eee_adv & MDIO_EEE_10GT)
+		adv |= ADVERTISED_10000baseT_Full;
+	if (eee_adv & MDIO_EEE_1000KX)
+		adv |= ADVERTISED_1000baseKX_Full;
+	if (eee_adv & MDIO_EEE_10GKX4)
+		adv |= ADVERTISED_10000baseKX4_Full;
+	if (eee_adv & MDIO_EEE_10GKR)
+		adv |= ADVERTISED_10000baseKR_Full;
+
+	return adv;
+}
+
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+	u16 reg = 0;
+
+	if (adv & ADVERTISED_100baseT_Full)
+		reg |= MDIO_EEE_100TX;
+	if (adv & ADVERTISED_1000baseT_Full)
+		reg |= MDIO_EEE_1000T;
+	if (adv & ADVERTISED_10000baseT_Full)
+		reg |= MDIO_EEE_10GT;
+	if (adv & ADVERTISED_1000baseKX_Full)
+		reg |= MDIO_EEE_1000KX;
+	if (adv & ADVERTISED_10000baseKX4_Full)
+		reg |= MDIO_EEE_10GKX4;
+	if (adv & ADVERTISED_10000baseKR_Full)
+		reg |= MDIO_EEE_10GKR;
+
+	return reg;
+}
+
 #endif /* __KERNEL__ */
 #endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 40c37216..32a1b5c 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -16,6 +16,7 @@
 struct pcap_platform_data {
 	unsigned int irq_base;
 	unsigned int config;
+	int gpio;
 	void (*init) (void *);	/* board specific init */
 	int num_subdevs;
 	struct pcap_subdev *subdevs;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index eb06e58..ccac82e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -953,7 +953,8 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	void                    (*ndo_poll_controller)(struct net_device *dev);
 	int			(*ndo_netpoll_setup)(struct net_device *dev,
-						     struct netpoll_info *info);
+						     struct netpoll_info *info,
+						     gfp_t gfp);
 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
 #endif
 	int			(*ndo_set_vf_mac)(struct net_device *dev,
@@ -1300,6 +1301,8 @@
 	/* for setting kernel sock attribute on TCP connection setup */
 #define GSO_MAX_SIZE		65536
 	unsigned int		gso_max_size;
+#define GSO_MAX_SEGS		65535
+	u16			gso_max_segs;
 
 #ifdef CONFIG_DCB
 	/* Data Center Bridging netlink ops */
@@ -1519,6 +1522,8 @@
 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
 					       struct sk_buff *skb);
 	int			(*gro_complete)(struct sk_buff *skb);
+	bool			(*id_match)(struct packet_type *ptype,
+					    struct sock *sk);
 	void			*af_packet_priv;
 	struct list_head	list;
 };
@@ -1548,7 +1553,7 @@
 #define NETDEV_PRE_TYPE_CHANGE	0x000E
 #define NETDEV_POST_TYPE_CHANGE	0x000F
 #define NETDEV_POST_INIT	0x0010
-#define NETDEV_UNREGISTER_BATCH 0x0011
+#define NETDEV_UNREGISTER_FINAL 0x0011
 #define NETDEV_RELEASE		0x0012
 #define NETDEV_NOTIFY_PEERS	0x0013
 #define NETDEV_JOIN		0x0014
@@ -2222,6 +2227,7 @@
  * kind of lower layer not just hardware media.
  */
 
+extern void linkwatch_init_dev(struct net_device *dev);
 extern void linkwatch_fire_event(struct net_device *dev);
 extern void linkwatch_forget_dev(struct net_device *dev);
 
@@ -2244,8 +2250,6 @@
 
 extern void netif_carrier_off(struct net_device *dev);
 
-extern void netif_notify_peers(struct net_device *dev);
-
 /**
  *	netif_dormant_on - mark device as dormant.
  *	@dev: network device
@@ -2594,8 +2598,7 @@
 extern int		dev_set_promiscuity(struct net_device *dev, int inc);
 extern int		dev_set_allmulti(struct net_device *dev, int inc);
 extern void		netdev_state_change(struct net_device *dev);
-extern int		netdev_bonding_change(struct net_device *dev,
-					      unsigned long event);
+extern void		netdev_notify_peers(struct net_device *dev);
 extern void		netdev_features_change(struct net_device *dev);
 /* Load a device via the kmod */
 extern void		dev_load(struct net *net, const char *name);
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index 0dfc8b7..89f2a62 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -164,7 +164,7 @@
 				      unsigned int dataoff, unsigned int datalen,
 				      const char *name,
 				      unsigned int *matchoff, unsigned int *matchlen,
-				      union nf_inet_addr *addr);
+				      union nf_inet_addr *addr, bool delim);
 extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
 					unsigned int off, unsigned int datalen,
 					const char *name,
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 28f5389..66d5379 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -23,6 +23,7 @@
 	u8 remote_mac[ETH_ALEN];
 
 	struct list_head rx; /* rx_np list element */
+	struct rcu_head rcu;
 };
 
 struct netpoll_info {
@@ -38,28 +39,40 @@
 	struct delayed_work tx_work;
 
 	struct netpoll *netpoll;
+	struct rcu_head rcu;
 };
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
 void netpoll_print_options(struct netpoll *np);
 int netpoll_parse_options(struct netpoll *np, char *opt);
-int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
 int netpoll_setup(struct netpoll *np);
 int netpoll_trap(void);
 void netpoll_set_trap(int trap);
 void __netpoll_cleanup(struct netpoll *np);
+void __netpoll_free_rcu(struct netpoll *np);
 void netpoll_cleanup(struct netpoll *np);
-int __netpoll_rx(struct sk_buff *skb);
+int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 			     struct net_device *dev);
 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
+	unsigned long flags;
+	local_irq_save(flags);
 	netpoll_send_skb_on_dev(np, skb, np->dev);
+	local_irq_restore(flags);
 }
 
 
 
 #ifdef CONFIG_NETPOLL
+static inline bool netpoll_rx_on(struct sk_buff *skb)
+{
+	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
+
+	return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
+}
+
 static inline bool netpoll_rx(struct sk_buff *skb)
 {
 	struct netpoll_info *npinfo;
@@ -67,14 +80,14 @@
 	bool ret = false;
 
 	local_irq_save(flags);
-	npinfo = rcu_dereference_bh(skb->dev->npinfo);
 
-	if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
+	if (!netpoll_rx_on(skb))
 		goto out;
 
+	npinfo = rcu_dereference_bh(skb->dev->npinfo);
 	spin_lock(&npinfo->rx_lock);
 	/* check rx_flags again with the lock held */
-	if (npinfo->rx_flags && __netpoll_rx(skb))
+	if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
 		ret = true;
 	spin_unlock(&npinfo->rx_lock);
 
@@ -83,13 +96,6 @@
 	return ret;
 }
 
-static inline int netpoll_rx_on(struct sk_buff *skb)
-{
-	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
-
-	return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
-}
-
 static inline int netpoll_receive_skb(struct sk_buff *skb)
 {
 	if (!list_empty(&skb->dev->napi_list))
@@ -119,7 +125,7 @@
 	}
 }
 
-static inline int netpoll_tx_running(struct net_device *dev)
+static inline bool netpoll_tx_running(struct net_device *dev)
 {
 	return irqs_disabled();
 }
@@ -127,11 +133,11 @@
 #else
 static inline bool netpoll_rx(struct sk_buff *skb)
 {
-	return 0;
+	return false;
 }
-static inline int netpoll_rx_on(struct sk_buff *skb)
+static inline bool netpoll_rx_on(struct sk_buff *skb)
 {
-	return 0;
+	return false;
 }
 static inline int netpoll_receive_skb(struct sk_buff *skb)
 {
@@ -147,9 +153,9 @@
 static inline void netpoll_netdev_init(struct net_device *dev)
 {
 }
-static inline int netpoll_tx_running(struct net_device *dev)
+static inline bool netpoll_tx_running(struct net_device *dev)
 {
-	return 0;
+	return false;
 }
 #endif
 
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2f38788..4584162 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -565,6 +565,14 @@
  *	%NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
  *	%NL80211_ATTR_WIPHY_CHANNEL_TYPE.
  *
+ * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
+ *	its %NL80211_ATTR_WDEV identifier. It must have been created with
+ *	%NL80211_CMD_NEW_INTERFACE previously. After it has been started, the
+ *	P2P Device can be used for P2P operations, e.g. remain-on-channel and
+ *	public action frame TX.
+ * @NL80211_CMD_STOP_P2P_DEVICE: Stop the given P2P Device, identified by
+ *	its %NL80211_ATTR_WDEV identifier.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -708,6 +716,9 @@
 
 	NL80211_CMD_CH_SWITCH_NOTIFY,
 
+	NL80211_CMD_START_P2P_DEVICE,
+	NL80211_CMD_STOP_P2P_DEVICE,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -1575,6 +1586,10 @@
  * @NL80211_IFTYPE_MESH_POINT: mesh point
  * @NL80211_IFTYPE_P2P_CLIENT: P2P client
  * @NL80211_IFTYPE_P2P_GO: P2P group owner
+ * @NL80211_IFTYPE_P2P_DEVICE: P2P device interface type, this is not a netdev
+ *	and therefore can't be created in the normal ways, use the
+ *	%NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
+ *	commands to create and destroy one
  * @NL80211_IFTYPE_MAX: highest interface type number currently defined
  * @NUM_NL80211_IFTYPES: number of defined interface types
  *
@@ -1593,6 +1608,7 @@
 	NL80211_IFTYPE_MESH_POINT,
 	NL80211_IFTYPE_P2P_CLIENT,
 	NL80211_IFTYPE_P2P_GO,
+	NL80211_IFTYPE_P2P_DEVICE,
 
 	/* keep last */
 	NUM_NL80211_IFTYPES,
@@ -2994,12 +3010,18 @@
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *	to work properly to suppport receiving regulatory hints from
  *	cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: If this is set, an active
+ *	P2P Device (%NL80211_IFTYPE_P2P_DEVICE) requires its own channel
+ *	in the interface combinations, even when it's only used for scan
+ *	and remain-on-channel. This could be due to, for example, the
+ *	remain-on-channel implementation requiring a channel context.
  */
 enum nl80211_feature_flags {
-	NL80211_FEATURE_SK_TX_STATUS	= 1 << 0,
-	NL80211_FEATURE_HT_IBSS		= 1 << 1,
-	NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
-	NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
+	NL80211_FEATURE_SK_TX_STATUS			= 1 << 0,
+	NL80211_FEATURE_HT_IBSS				= 1 << 1,
+	NL80211_FEATURE_INACTIVITY_TIMER		= 1 << 2,
+	NL80211_FEATURE_CELL_BASE_REG_HINTS		= 1 << 3,
+	NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL	= 1 << 4,
 };
 
 /**
diff --git a/include/linux/of.h b/include/linux/of.h
index 5919ee3..1b11632 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -190,10 +190,17 @@
 extern struct device_node *of_get_next_parent(struct device_node *node);
 extern struct device_node *of_get_next_child(const struct device_node *node,
 					     struct device_node *prev);
+extern struct device_node *of_get_next_available_child(
+	const struct device_node *node, struct device_node *prev);
+
 #define for_each_child_of_node(parent, child) \
 	for (child = of_get_next_child(parent, NULL); child != NULL; \
 	     child = of_get_next_child(parent, child))
 
+#define for_each_available_child_of_node(parent, child) \
+	for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+	     child = of_get_next_available_child(parent, child))
+
 static inline int of_get_child_count(const struct device_node *np)
 {
 	struct device_node *child;
diff --git a/include/linux/packet_diag.h b/include/linux/packet_diag.h
new file mode 100644
index 0000000..93f5fa9
--- /dev/null
+++ b/include/linux/packet_diag.h
@@ -0,0 +1,72 @@
+#ifndef __PACKET_DIAG_H__
+#define __PACKET_DIAG_H__
+
+#include <linux/types.h>
+
+struct packet_diag_req {
+	__u8	sdiag_family;
+	__u8	sdiag_protocol;
+	__u16	pad;
+	__u32	pdiag_ino;
+	__u32	pdiag_show;
+	__u32	pdiag_cookie[2];
+};
+
+#define PACKET_SHOW_INFO	0x00000001 /* Basic packet_sk information */
+#define PACKET_SHOW_MCLIST	0x00000002 /* A set of packet_diag_mclist-s */
+#define PACKET_SHOW_RING_CFG	0x00000004 /* Rings configuration parameters */
+#define PACKET_SHOW_FANOUT	0x00000008
+
+struct packet_diag_msg {
+	__u8	pdiag_family;
+	__u8	pdiag_type;
+	__u16	pdiag_num;
+
+	__u32	pdiag_ino;
+	__u32	pdiag_cookie[2];
+};
+
+enum {
+	PACKET_DIAG_INFO,
+	PACKET_DIAG_MCLIST,
+	PACKET_DIAG_RX_RING,
+	PACKET_DIAG_TX_RING,
+	PACKET_DIAG_FANOUT,
+
+	PACKET_DIAG_MAX,
+};
+
+struct packet_diag_info {
+	__u32	pdi_index;
+	__u32	pdi_version;
+	__u32	pdi_reserve;
+	__u32	pdi_copy_thresh;
+	__u32	pdi_tstamp;
+	__u32	pdi_flags;
+
+#define PDI_RUNNING	0x1
+#define PDI_AUXDATA	0x2
+#define PDI_ORIGDEV	0x4
+#define PDI_VNETHDR	0x8
+#define PDI_LOSS	0x10
+};
+
+struct packet_diag_mclist {
+	__u32	pdmc_index;
+	__u32	pdmc_count;
+	__u16	pdmc_type;
+	__u16	pdmc_alen;
+	__u8	pdmc_addr[MAX_ADDR_LEN];
+};
+
+struct packet_diag_ring {
+	__u32	pdr_block_size;
+	__u32	pdr_block_nr;
+	__u32	pdr_frame_size;
+	__u32	pdr_frame_nr;
+	__u32	pdr_retire_tmo;
+	__u32	pdr_sizeof_priv;
+	__u32	pdr_features;
+};
+
+#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 76c5c8b..7602ccb 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1272,7 +1272,8 @@
 extern void perf_event_init(void);
 extern void perf_tp_event(u64 addr, u64 count, void *record,
 			  int entry_size, struct pt_regs *regs,
-			  struct hlist_head *head, int rctx);
+			  struct hlist_head *head, int rctx,
+			  struct task_struct *task);
 extern void perf_bp_event(struct perf_event *event, void *data);
 
 #ifndef perf_misc_flags
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 6dd96fb..e9b7f43 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -20,6 +20,7 @@
 /* This struct is private to the core and should be regarded as a cookie */
 struct pinctrl;
 struct pinctrl_state;
+struct device;
 
 #ifdef CONFIG_PINCTRL
 
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 6fdf027..0ec590b 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -354,6 +354,37 @@
 }
 #endif /* RFKILL || RFKILL_MODULE */
 
+
+#ifdef CONFIG_RFKILL_LEDS
+/**
+ * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
+ * This function might return a NULL pointer if registering of the
+ * LED trigger failed. Use this as "default_trigger" for the LED.
+ */
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
+
+/**
+ * rfkill_set_led_trigger_name -- set the LED trigger name
+ * @rfkill: rfkill struct
+ * @name: LED trigger name
+ *
+ * This function sets the LED trigger name of the radio LED
+ * trigger that rfkill creates. It is optional, but if called
+ * must be called before rfkill_register() to be effective.
+ */
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
+#else
+static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+	return NULL;
+}
+
+static inline void
+rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+}
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* RFKILL_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c147e70..b8c8664 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -334,14 +334,6 @@
 }
 #endif
 
-#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND)
-void lockup_detector_bootcpu_resume(void);
-#else
-static inline void lockup_detector_bootcpu_resume(void)
-{
-}
-#endif
-
 #ifdef CONFIG_DETECT_HUNG_TASK
 extern unsigned int  sysctl_hung_task_panic;
 extern unsigned long sysctl_hung_task_check_count;
diff --git a/include/linux/security.h b/include/linux/security.h
index 4e5a73c..3dea6a9 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1242,8 +1242,6 @@
  *	Check that the @parent process has sufficient permission to trace the
  *	current process before allowing the current process to present itself
  *	to the @parent process for tracing.
- *	The parent process will still have to undergo the ptrace_access_check
- *	checks before it is allowed to trace this one.
  *	@parent contains the task_struct structure for debugger process.
  *	Return 0 if permission is granted.
  * @capget:
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7632c87..b33a3a1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -846,13 +846,16 @@
  *
  *	NULL is returned on a memory allocation failure.
  */
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
-					      gfp_t pri)
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
 {
 	might_sleep_if(pri & __GFP_WAIT);
 	if (skb_shared(skb)) {
 		struct sk_buff *nskb = skb_clone(skb, pri);
-		kfree_skb(skb);
+
+		if (likely(nskb))
+			consume_skb(skb);
+		else
+			kfree_skb(skb);
 		skb = nskb;
 	}
 	return skb;
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 00bc189..ad6e3a6 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -18,7 +18,14 @@
 enum
 {
 	IPSTATS_MIB_NUM = 0,
+/* frequently written fields in fast path, kept in same cache line */
 	IPSTATS_MIB_INPKTS,			/* InReceives */
+	IPSTATS_MIB_INOCTETS,			/* InOctets */
+	IPSTATS_MIB_INDELIVERS,			/* InDelivers */
+	IPSTATS_MIB_OUTFORWDATAGRAMS,		/* OutForwDatagrams */
+	IPSTATS_MIB_OUTPKTS,			/* OutRequests */
+	IPSTATS_MIB_OUTOCTETS,			/* OutOctets */
+/* other fields */
 	IPSTATS_MIB_INHDRERRORS,		/* InHdrErrors */
 	IPSTATS_MIB_INTOOBIGERRORS,		/* InTooBigErrors */
 	IPSTATS_MIB_INNOROUTES,			/* InNoRoutes */
@@ -26,9 +33,6 @@
 	IPSTATS_MIB_INUNKNOWNPROTOS,		/* InUnknownProtos */
 	IPSTATS_MIB_INTRUNCATEDPKTS,		/* InTruncatedPkts */
 	IPSTATS_MIB_INDISCARDS,			/* InDiscards */
-	IPSTATS_MIB_INDELIVERS,			/* InDelivers */
-	IPSTATS_MIB_OUTFORWDATAGRAMS,		/* OutForwDatagrams */
-	IPSTATS_MIB_OUTPKTS,			/* OutRequests */
 	IPSTATS_MIB_OUTDISCARDS,		/* OutDiscards */
 	IPSTATS_MIB_OUTNOROUTES,		/* OutNoRoutes */
 	IPSTATS_MIB_REASMTIMEOUT,		/* ReasmTimeout */
@@ -42,8 +46,6 @@
 	IPSTATS_MIB_OUTMCASTPKTS,		/* OutMcastPkts */
 	IPSTATS_MIB_INBCASTPKTS,		/* InBcastPkts */
 	IPSTATS_MIB_OUTBCASTPKTS,		/* OutBcastPkts */
-	IPSTATS_MIB_INOCTETS,			/* InOctets */
-	IPSTATS_MIB_OUTOCTETS,			/* OutOctets */
 	IPSTATS_MIB_INMCASTOCTETS,		/* InMcastOctets */
 	IPSTATS_MIB_OUTMCASTOCTETS,		/* OutMcastOctets */
 	IPSTATS_MIB_INBCASTOCTETS,		/* InBcastOctets */
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 1a6b004..c2b02a5 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -504,7 +504,9 @@
 #define SSB_CHIPCO_FLASHCTL_ST_SE	0x02D8		/* Sector Erase */
 #define SSB_CHIPCO_FLASHCTL_ST_BE	0x00C7		/* Bulk Erase */
 #define SSB_CHIPCO_FLASHCTL_ST_DP	0x00B9		/* Deep Power-down */
-#define SSB_CHIPCO_FLASHCTL_ST_RSIG	0x03AB		/* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_RES	0x03AB		/* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_CSA	0x1000		/* Keep chip select asserted */
+#define SSB_CHIPCO_FLASHCTL_ST_SSE	0x0220		/* Sub-sector Erase */
 
 /* Status register bits for ST flashes */
 #define SSB_CHIPCO_FLASHSTA_ST_WIP	0x01		/* Write In Progress */
diff --git a/include/linux/string.h b/include/linux/string.h
index ffe0442..b917881 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -144,8 +144,8 @@
 {
 	return strncmp(str, prefix, strlen(prefix)) == 0;
 }
-#endif
 
 extern size_t memweight(const void *ptr, size_t bytes);
 
+#endif /* __KERNEL__ */
 #endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 99bc88b..7c5ceb2 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -232,7 +232,7 @@
  * estimated error = NTP dispersion.
  */
 extern unsigned long tick_usec;		/* USER_HZ period (usec) */
-extern unsigned long tick_nsec;		/* ACTHZ          period (nsec) */
+extern unsigned long tick_nsec;		/* SHIFTED_HZ period (nsec) */
 
 extern void ntp_init(void);
 extern void ntp_clear(void);
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index c989284..0b1e3f2 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -89,8 +89,8 @@
 
 #define  TIPC_CMD_GET_REMOTE_MNG    0x4003    /* tx none, rx unsigned */
 #define  TIPC_CMD_GET_MAX_PORTS     0x4004    /* tx none, rx unsigned */
-#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* tx none, rx unsigned */
-#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* obsoleted */
+#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_ZONES     0x4007    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_CLUSTERS  0x4008    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_NODES     0x4009    /* obsoleted */
@@ -115,8 +115,8 @@
 #define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
 #define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
-#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
-#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* obsoleted */
+#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_ZONES     0x8007    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_CLUSTERS  0x8008    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_NODES     0x8009    /* obsoleted */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e91cd43..fec12d6 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -164,6 +164,7 @@
 				| 0*SD_SHARE_CPUPOWER			\
 				| 0*SD_SHARE_PKG_RESOURCES		\
 				| 0*SD_SERIALIZE			\
+				| 1*SD_PREFER_SIBLING			\
 				,					\
 	.last_balance		= jiffies,				\
 	.balance_interval	= 1,					\
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index c66fe33..50c3e8f 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -104,7 +104,6 @@
 	wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
 }
 
-
 /*
  * mm/page-writeback.c
  */
diff --git a/include/net/arp.h b/include/net/arp.h
index 7f7df93..b630dae 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -3,6 +3,7 @@
 #define _ARP_H
 
 #include <linux/if_arp.h>
+#include <linux/hash.h>
 #include <net/neighbour.h>
 
 
@@ -10,7 +11,7 @@
 
 static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
 {
-	u32 val = key ^ dev->ifindex;
+	u32 val = key ^ hash32_ptr(dev);
 
 	return val * hash_rnd;
 }
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 565d4be..ede0369 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -27,6 +27,7 @@
 
 #include <linux/poll.h>
 #include <net/sock.h>
+#include <linux/seq_file.h>
 
 #ifndef AF_BLUETOOTH
 #define AF_BLUETOOTH	31
@@ -202,6 +203,10 @@
 struct bt_sock_list {
 	struct hlist_head head;
 	rwlock_t          lock;
+#ifdef CONFIG_PROC_FS
+        struct file_operations   fops;
+        int (* custom_seq_show)(struct seq_file *, void *);
+#endif
 };
 
 int  bt_sock_register(int proto, const struct net_proto_family *ops);
@@ -292,6 +297,11 @@
 extern int bt_sysfs_init(void);
 extern void bt_sysfs_cleanup(void);
 
+extern int  bt_procfs_init(struct module* module, struct net *net, const char *name,
+			   struct bt_sock_list* sk_list,
+			   int (* seq_show)(struct seq_file *, void *));
+extern void bt_procfs_cleanup(struct net *net, const char *name);
+
 extern struct dentry *bt_debugfs;
 
 int l2cap_init(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index ccd723e..23cf413 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -62,6 +62,15 @@
 /* First BR/EDR Controller shall have ID = 0 */
 #define HCI_BREDR_ID	0
 
+/* AMP controller status */
+#define AMP_CTRL_POWERED_DOWN			0x00
+#define AMP_CTRL_BLUETOOTH_ONLY			0x01
+#define AMP_CTRL_NO_CAPACITY			0x02
+#define AMP_CTRL_LOW_CAPACITY			0x03
+#define AMP_CTRL_MEDIUM_CAPACITY		0x04
+#define AMP_CTRL_HIGH_CAPACITY			0x05
+#define AMP_CTRL_FULL_CAPACITY			0x06
+
 /* HCI device quirks */
 enum {
 	HCI_QUIRK_RESET_ON_CLOSE,
@@ -1295,6 +1304,8 @@
 } __packed;
 
 /* Low energy meta events */
+#define LE_CONN_ROLE_MASTER	0x00
+
 #define HCI_EV_LE_CONN_COMPLETE		0x01
 struct hci_ev_le_conn_complete {
 	__u8     status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 475b8c0..41d9439 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -115,12 +115,6 @@
 	u8 randomizer[16];
 };
 
-struct adv_entry {
-	struct list_head list;
-	bdaddr_t bdaddr;
-	u8 bdaddr_type;
-};
-
 struct le_scan_params {
 	u8 type;
 	u16 interval;
@@ -356,16 +350,16 @@
 
 /* ----- HCI interface to upper protocols ----- */
 extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
 extern int l2cap_disconn_ind(struct hci_conn *hcon);
-extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
 extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
 extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
 			      u16 flags);
 
 extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
 extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
 
 /* ----- Inquiry cache ----- */
@@ -587,8 +581,7 @@
 
 static inline void hci_conn_hold(struct hci_conn *conn)
 {
-	BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
-	       atomic_read(&conn->refcnt) + 1);
+	BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
 
 	atomic_inc(&conn->refcnt);
 	cancel_delayed_work(&conn->disc_work);
@@ -596,8 +589,7 @@
 
 static inline void hci_conn_put(struct hci_conn *conn)
 {
-	BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
-	       atomic_read(&conn->refcnt) - 1);
+	BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
 
 	if (atomic_dec_and_test(&conn->refcnt)) {
 		unsigned long timeo;
@@ -1056,7 +1048,7 @@
 int mgmt_interleaved_discovery(struct hci_dev *hdev);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-
+bool mgmt_valid_hdev(struct hci_dev *hdev);
 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
 
 /* HCI info for socket */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index a7679f8..d206296 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -671,20 +671,8 @@
 	L2CAP_EV_RECV_FRAME,
 };
 
-static inline void l2cap_chan_hold(struct l2cap_chan *c)
-{
-	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
-
-	atomic_inc(&c->refcnt);
-}
-
-static inline void l2cap_chan_put(struct l2cap_chan *c)
-{
-	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
-
-	if (atomic_dec_and_test(&c->refcnt))
-		kfree(c);
-}
+void l2cap_chan_hold(struct l2cap_chan *c);
+void l2cap_chan_put(struct l2cap_chan *c);
 
 static inline void l2cap_chan_lock(struct l2cap_chan *chan)
 {
@@ -771,7 +759,6 @@
 
 struct l2cap_chan *l2cap_chan_create(void);
 void l2cap_chan_close(struct l2cap_chan *chan, int reason);
-void l2cap_chan_destroy(struct l2cap_chan *chan);
 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
 		       bdaddr_t *dst, u8 dst_type);
 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index ca356a7..50993a5 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -108,8 +108,8 @@
 #define SMP_CONFIRM_FAILED		0x04
 #define SMP_PAIRING_NOTSUPP		0x05
 #define SMP_ENC_KEY_SIZE		0x06
-#define SMP_CMD_NOTSUPP		0x07
-#define SMP_UNSPECIFIED		0x08
+#define SMP_CMD_NOTSUPP			0x07
+#define SMP_UNSPECIFIED			0x08
 #define SMP_REPEATED_ATTEMPTS		0x09
 
 #define SMP_MIN_ENC_KEY_SIZE		7
@@ -123,8 +123,8 @@
 	struct l2cap_conn *conn;
 	u8		preq[7]; /* SMP Pairing Request */
 	u8		prsp[7]; /* SMP Pairing Response */
-	u8              prnd[16]; /* SMP Pairing Random (local) */
-	u8              rrnd[16]; /* SMP Pairing Random (remote) */
+	u8		prnd[16]; /* SMP Pairing Random (local) */
+	u8		rrnd[16]; /* SMP Pairing Random (remote) */
 	u8		pcnf[16]; /* SMP Pairing Confirm */
 	u8		tk[16]; /* SMP Temporary Key */
 	u8		enc_key_size;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 493fa0c..ba2e616 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -96,6 +96,7 @@
  * 	is not permitted.
  * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
  * 	is not permitted.
+ * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
  */
 enum ieee80211_channel_flags {
 	IEEE80211_CHAN_DISABLED		= 1<<0,
@@ -104,6 +105,7 @@
 	IEEE80211_CHAN_RADAR		= 1<<3,
 	IEEE80211_CHAN_NO_HT40PLUS	= 1<<4,
 	IEEE80211_CHAN_NO_HT40MINUS	= 1<<5,
+	IEEE80211_CHAN_NO_OFDM		= 1<<6,
 };
 
 #define IEEE80211_CHAN_NO_HT40 \
@@ -1437,7 +1439,8 @@
  * @add_virtual_intf: create a new virtual interface with the given name,
  *	must set the struct wireless_dev's iftype. Beware: You must create
  *	the new netdev in the wiphy's network namespace! Returns the struct
- *	wireless_dev, or an ERR_PTR.
+ *	wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
+ *	also set the address member in the wdev.
  *
  * @del_virtual_intf: remove the virtual interface
  *
@@ -1616,6 +1619,9 @@
  * @get_channel: Get the current operating channel for the virtual interface.
  *	For monitor interfaces, it should return %NULL unless there's a single
  *	current monitoring channel.
+ *
+ * @start_p2p_device: Start the given P2P device.
+ * @stop_p2p_device: Stop the given P2P device.
  */
 struct cfg80211_ops {
 	int	(*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1832,6 +1838,11 @@
 		(*get_channel)(struct wiphy *wiphy,
 			       struct wireless_dev *wdev,
 			       enum nl80211_channel_type *type);
+
+	int	(*start_p2p_device)(struct wiphy *wiphy,
+				    struct wireless_dev *wdev);
+	void	(*stop_p2p_device)(struct wiphy *wiphy,
+				   struct wireless_dev *wdev);
 };
 
 /*
@@ -2395,6 +2406,8 @@
  * @cleanup_work: work struct used for cleanup that can't be done directly
  * @beacon_interval: beacon interval used on this device for transmitting
  *	beacons, 0 when not valid
+ * @address: The address for this device, valid only if @netdev is %NULL
+ * @p2p_started: true if this is a P2P Device that has been started
  */
 struct wireless_dev {
 	struct wiphy *wiphy;
@@ -2413,7 +2426,9 @@
 
 	struct work_struct cleanup_work;
 
-	bool use_4addr;
+	bool use_4addr, p2p_started;
+
+	u8 address[ETH_ALEN] __aligned(sizeof(u16));
 
 	/* currently used for IBSS and SME - might be rearranged later */
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -2461,6 +2476,13 @@
 #endif
 };
 
+static inline u8 *wdev_address(struct wireless_dev *wdev)
+{
+	if (wdev->netdev)
+		return wdev->netdev->dev_addr;
+	return wdev->address;
+}
+
 /**
  * wdev_priv - return wiphy priv from wireless_dev
  *
@@ -3528,6 +3550,22 @@
  */
 u32 cfg80211_calculate_bitrate(struct rate_info *rate);
 
+/**
+ * cfg80211_unregister_wdev - remove the given wdev
+ * @wdev: struct wireless_dev to remove
+ *
+ * Call this function only for wdevs that have no netdev assigned,
+ * e.g. P2P Devices. It removes the device from the list so that
+ * it can no longer be used. It is necessary to call this function
+ * even when cfg80211 requests the removal of the interface by
+ * calling the del_virtual_intf() callback. The function must also
+ * be called when the driver wishes to unregister the wdev, e.g.
+ * when the device is unbound from the driver.
+ *
+ * Requires the RTNL to be held.
+ */
+void cfg80211_unregister_wdev(struct wireless_dev *wdev);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/codel.h b/include/net/codel.h
index 550debf..389cf62 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -305,6 +305,8 @@
 			}
 		}
 	} else if (drop) {
+		u32 delta;
+
 		if (params->ecn && INET_ECN_set_ce(skb)) {
 			stats->ecn_mark++;
 		} else {
@@ -320,9 +322,11 @@
 		 * assume that the drop rate that controlled the queue on the
 		 * last cycle is a good starting point to control it now.
 		 */
-		if (codel_time_before(now - vars->drop_next,
+		delta = vars->count - vars->lastcount;
+		if (delta > 1 &&
+		    codel_time_before(now - vars->drop_next,
 				      16 * params->interval)) {
-			vars->count = (vars->count - vars->lastcount) | 1;
+			vars->count = delta;
 			/* we dont care if rec_inv_sqrt approximation
 			 * is not very precise :
 			 * Next Newton steps will correct it quadratically.
diff --git a/include/net/dst.h b/include/net/dst.h
index baf5978..9a78810 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -110,7 +110,7 @@
 };
 
 extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[RTAX_MAX];
+extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY	0x1UL
 #define __DST_METRICS_PTR(Y)	\
@@ -396,11 +396,15 @@
 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
 				   struct sk_buff *skb)
 {
-	struct hh_cache *hh;
+	const struct hh_cache *hh;
 
-	if (unlikely(dst->pending_confirm)) {
-		n->confirmed = jiffies;
+	if (dst->pending_confirm) {
+		unsigned long now = jiffies;
+
 		dst->pending_confirm = 0;
+		/* avoid dirtying neighbour */
+		if (n->confirmed != now)
+			n->confirmed = now;
 	}
 
 	hh = &n->hh;
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 7139254..7f0df13 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -183,6 +183,9 @@
  *     Contains a bitmap of known fields/flags, the flags, and
  *     the MCS index.
  *
+ * IEEE80211_RADIOTAP_AMPDU_STATUS	u32, u16, u8, u8	unitless
+ *
+ *	Contains the AMPDU information for the subframe.
  */
 enum ieee80211_radiotap_type {
 	IEEE80211_RADIOTAP_TSFT = 0,
@@ -205,6 +208,7 @@
 	IEEE80211_RADIOTAP_DATA_RETRIES = 17,
 
 	IEEE80211_RADIOTAP_MCS = 19,
+	IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
 
 	/* valid in every it_present bitmap, even vendor namespaces */
 	IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -270,6 +274,13 @@
 #define IEEE80211_RADIOTAP_MCS_FMT_GF		0x08
 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC		0x10
 
+/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
+#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN		0x0001
+#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN		0x0002
+#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN		0x0004
+#define IEEE80211_RADIOTAP_AMPDU_IS_LAST		0x0008
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR		0x0010
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN	0x0020
 
 /* helpers */
 static inline int ieee80211_get_radiotap_len(unsigned char *data)
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5ee66f5..ba1d361 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -39,6 +39,7 @@
 	int	    (*queue_xmit)(struct sk_buff *skb, struct flowi *fl);
 	void	    (*send_check)(struct sock *sk, struct sk_buff *skb);
 	int	    (*rebuild_header)(struct sock *sk);
+	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
 	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
 	struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
 				      struct request_sock *req,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83b567f..613cfa4 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -249,13 +249,4 @@
 	return flags;
 }
 
-static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
-{
-	struct dst_entry *dst = skb_dst(skb);
-
-	dst_hold(dst);
-	sk->sk_rx_dst = dst;
-	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
-}
-
 #endif	/* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index bd5e444a..5a5d84d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -120,7 +120,7 @@
 				      struct flowi4 *fl4,
 				      struct sk_buff_head *queue,
 				      struct inet_cork *cork);
-extern int		ip_send_skb(struct sk_buff *skb);
+extern int		ip_send_skb(struct net *net, struct sk_buff *skb);
 extern int		ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
 extern void		ip_flush_pending_frames(struct sock *sk);
 extern struct sk_buff  *ip_make_skb(struct sock *sk,
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 358fb86..e03047f 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -5,6 +5,8 @@
 #include <linux/netdevice.h>
 #include <linux/ip6_tunnel.h>
 
+#define IP6TUNNEL_ERR_TIMEO (30*HZ)
+
 /* capable of sending packets */
 #define IP6_TNL_F_CAP_XMIT 0x10000
 /* capable of receiving packets */
@@ -12,15 +14,40 @@
 /* determine capability on a per-packet basis */
 #define IP6_TNL_F_CAP_PER_PACKET 0x40000
 
-/* IPv6 tunnel */
+struct __ip6_tnl_parm {
+	char name[IFNAMSIZ];	/* name of tunnel device */
+	int link;		/* ifindex of underlying L2 interface */
+	__u8 proto;		/* tunnel protocol */
+	__u8 encap_limit;	/* encapsulation limit for tunnel */
+	__u8 hop_limit;		/* hop limit for tunnel */
+	__be32 flowinfo;	/* traffic class and flowlabel for tunnel */
+	__u32 flags;		/* tunnel flags */
+	struct in6_addr laddr;	/* local tunnel end-point address */
+	struct in6_addr raddr;	/* remote tunnel end-point address */
 
+	__be16			i_flags;
+	__be16			o_flags;
+	__be32			i_key;
+	__be32			o_key;
+};
+
+/* IPv6 tunnel */
 struct ip6_tnl {
 	struct ip6_tnl __rcu *next;	/* next tunnel in list */
 	struct net_device *dev;	/* virtual device associated with tunnel */
-	struct ip6_tnl_parm parms;	/* tunnel configuration parameters */
+	struct __ip6_tnl_parm parms;	/* tunnel configuration parameters */
 	struct flowi fl;	/* flowi template for xmit */
 	struct dst_entry *dst_cache;    /* cached dst */
 	u32 dst_cookie;
+
+	int err_count;
+	unsigned long err_time;
+
+	/* These fields used only by GRE */
+	__u32 i_seqno;	/* The last seen seqno	*/
+	__u32 o_seqno;	/* The last output seqno */
+	int hlen;       /* Precalculated GRE header length */
+	int mlink;
 };
 
 /* Tunnel encapsulation limit destination sub-option */
@@ -31,4 +58,14 @@
 	__u8 encap_limit;	/* tunnel encapsulation limit   */
 } __packed;
 
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
+void ip6_tnl_dst_reset(struct ip6_tnl *t);
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+		const struct in6_addr *raddr);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
+			     const struct in6_addr *raddr);
+
 #endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 95374d1..ee75ccd 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -808,8 +808,6 @@
 	struct list_head	rs_table[IP_VS_RTAB_SIZE];
 	/* ip_vs_app */
 	struct list_head	app_list;
-	/* ip_vs_ftp */
-	struct ip_vs_app	*ftp_app;
 	/* ip_vs_proto */
 	#define IP_VS_PROTO_TAB_SIZE	32	/* must be power of 2 */
 	struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
@@ -890,6 +888,7 @@
 	unsigned int		sysctl_sync_refresh_period;
 	int			sysctl_sync_retries;
 	int			sysctl_nat_icmp_send;
+	int			sysctl_pmtu_disc;
 
 	/* ip_vs_lblc */
 	int			sysctl_lblc_expiration;
@@ -976,6 +975,11 @@
 	return ipvs->sysctl_sync_sock_size;
 }
 
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+	return ipvs->sysctl_pmtu_disc;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1018,6 +1022,11 @@
 	return 0;
 }
 
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+	return 1;
+}
+
 #endif
 
 /*
@@ -1179,7 +1188,8 @@
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern struct ip_vs_app *register_ip_vs_app(struct net *net,
+					    struct ip_vs_app *app);
 extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index c8a2024..9bed5d4 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -34,6 +34,7 @@
 #define NEXTHDR_IPV6		41	/* IPv6 in IPv6 */
 #define NEXTHDR_ROUTING		43	/* Routing header. */
 #define NEXTHDR_FRAGMENT	44	/* Fragmentation/reassembly header. */
+#define NEXTHDR_GRE		47	/* GRE header. */
 #define NEXTHDR_ESP		50	/* Encapsulating security payload. */
 #define NEXTHDR_AUTH		51	/* Authentication header. */
 #define NEXTHDR_ICMP		58	/* ICMP for IPv6. */
diff --git a/include/net/llc.h b/include/net/llc.h
index 226c846..f2d0fc5 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -133,7 +133,7 @@
 extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
 extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
 
-extern int llc_station_init(void);
+extern void llc_station_init(void);
 extern void llc_station_exit(void);
 
 #ifdef CONFIG_PROC_FS
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index bb86aa6..71f8262 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -171,6 +171,7 @@
  * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
  * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
  * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
+ * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
  */
 enum ieee80211_bss_change {
 	BSS_CHANGED_ASSOC		= 1<<0,
@@ -190,6 +191,7 @@
 	BSS_CHANGED_IDLE		= 1<<14,
 	BSS_CHANGED_SSID		= 1<<15,
 	BSS_CHANGED_AP_PROBE_RESP	= 1<<16,
+	BSS_CHANGED_PS			= 1<<17,
 
 	/* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -266,6 +268,8 @@
  * @idle: This interface is idle. There's also a global idle flag in the
  *	hardware config which may be more appropriate depending on what
  *	your driver/device needs to do.
+ * @ps: power-save mode (STA only). This flag is NOT affected by
+ *	offchannel/dynamic_ps operations.
  * @ssid: The SSID of the current vif. Only valid in AP-mode.
  * @ssid_len: Length of SSID given in @ssid.
  * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
@@ -296,6 +300,7 @@
 	bool arp_filter_enabled;
 	bool qos;
 	bool idle;
+	bool ps;
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
 	size_t ssid_len;
 	bool hidden_ssid;
@@ -522,9 +527,6 @@
  *  (2) driver internal use (if applicable)
  *  (3) TX status information - driver tells mac80211 what happened
  *
- * The TX control's sta pointer is only valid during the ->tx call,
- * it may be NULL.
- *
  * @flags: transmit info flags, defined above
  * @band: the band to transmit on (use for checking for races)
  * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
@@ -555,6 +557,7 @@
 					struct ieee80211_tx_rate rates[
 						IEEE80211_TX_MAX_RATES];
 					s8 rts_cts_rate_idx;
+					/* 3 bytes free */
 				};
 				/* only needed before rate control */
 				unsigned long jiffies;
@@ -562,7 +565,7 @@
 			/* NB: vif can be NULL for injected frames */
 			struct ieee80211_vif *vif;
 			struct ieee80211_key_conf *hw_key;
-			struct ieee80211_sta *sta;
+			/* 8 bytes free */
 		} control;
 		struct {
 			struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -673,21 +676,41 @@
  * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
  *	the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
  *	to hw.radiotap_mcs_details to advertise that fact
+ * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
+ *	number (@ampdu_reference) must be populated and be a distinct number for
+ *	each A-MPDU
+ * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
+ * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
+ *	monitoring purposes only
+ * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
+ *	subframes of a single A-MPDU
+ * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
+ * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
+ *	on this subframe
+ * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
+ *	is stored in the @ampdu_delimiter_crc field)
  */
 enum mac80211_rx_flags {
-	RX_FLAG_MMIC_ERROR	= 1<<0,
-	RX_FLAG_DECRYPTED	= 1<<1,
-	RX_FLAG_MMIC_STRIPPED	= 1<<3,
-	RX_FLAG_IV_STRIPPED	= 1<<4,
-	RX_FLAG_FAILED_FCS_CRC	= 1<<5,
-	RX_FLAG_FAILED_PLCP_CRC = 1<<6,
-	RX_FLAG_MACTIME_MPDU	= 1<<7,
-	RX_FLAG_SHORTPRE	= 1<<8,
-	RX_FLAG_HT		= 1<<9,
-	RX_FLAG_40MHZ		= 1<<10,
-	RX_FLAG_SHORT_GI	= 1<<11,
-	RX_FLAG_NO_SIGNAL_VAL	= 1<<12,
-	RX_FLAG_HT_GF		= 1<<13,
+	RX_FLAG_MMIC_ERROR		= BIT(0),
+	RX_FLAG_DECRYPTED		= BIT(1),
+	RX_FLAG_MMIC_STRIPPED		= BIT(3),
+	RX_FLAG_IV_STRIPPED		= BIT(4),
+	RX_FLAG_FAILED_FCS_CRC		= BIT(5),
+	RX_FLAG_FAILED_PLCP_CRC 	= BIT(6),
+	RX_FLAG_MACTIME_MPDU		= BIT(7),
+	RX_FLAG_SHORTPRE		= BIT(8),
+	RX_FLAG_HT			= BIT(9),
+	RX_FLAG_40MHZ			= BIT(10),
+	RX_FLAG_SHORT_GI		= BIT(11),
+	RX_FLAG_NO_SIGNAL_VAL		= BIT(12),
+	RX_FLAG_HT_GF			= BIT(13),
+	RX_FLAG_AMPDU_DETAILS		= BIT(14),
+	RX_FLAG_AMPDU_REPORT_ZEROLEN	= BIT(15),
+	RX_FLAG_AMPDU_IS_ZEROLEN	= BIT(16),
+	RX_FLAG_AMPDU_LAST_KNOWN	= BIT(17),
+	RX_FLAG_AMPDU_IS_LAST		= BIT(18),
+	RX_FLAG_AMPDU_DELIM_CRC_ERROR	= BIT(19),
+	RX_FLAG_AMPDU_DELIM_CRC_KNOWN	= BIT(20),
 };
 
 /**
@@ -711,17 +734,22 @@
  *	HT rates are use (RX_FLAG_HT)
  * @flag: %RX_FLAG_*
  * @rx_flags: internal RX flags for mac80211
+ * @ampdu_reference: A-MPDU reference number, must be a different value for
+ *	each A-MPDU but the same for each subframe within one A-MPDU
+ * @ampdu_delimiter_crc: A-MPDU delimiter CRC
  */
 struct ieee80211_rx_status {
 	u64 mactime;
 	u32 device_timestamp;
-	u16 flag;
+	u32 ampdu_reference;
+	u32 flag;
 	u16 freq;
 	u8 rate_idx;
 	u8 rx_flags;
 	u8 band;
 	u8 antenna;
 	s8 signal;
+	u8 ampdu_delimiter_crc;
 };
 
 /**
@@ -1074,6 +1102,16 @@
 };
 
 /**
+ * struct ieee80211_tx_control - TX control data
+ *
+ * @sta: station table entry, this sta pointer may be NULL and
+ * 	it is not allowed to copy the pointer, due to RCU.
+ */
+struct ieee80211_tx_control {
+	struct ieee80211_sta *sta;
+};
+
+/**
  * enum ieee80211_hw_flags - hardware flags
  *
  * These flags are used to indicate hardware capabilities to
@@ -1203,6 +1241,10 @@
  *	queue mapping in order to use different queues (not just one per AC)
  *	for different virtual interfaces. See the doc section on HW queue
  *	control for more details.
+ *
+ * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
+ *	P2P Interface. This will be honoured even if more than one interface
+ *	is supported.
  */
 enum ieee80211_hw_flags {
 	IEEE80211_HW_HAS_RATE_CONTROL			= 1<<0,
@@ -1230,6 +1272,7 @@
 	IEEE80211_HW_AP_LINK_PS				= 1<<22,
 	IEEE80211_HW_TX_AMPDU_SETUP_IN_HW		= 1<<23,
 	IEEE80211_HW_SCAN_WHILE_IDLE			= 1<<24,
+	IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF		= 1<<25,
 };
 
 /**
@@ -1884,10 +1927,14 @@
  * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
  *	to this station changed.
  * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
+ * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
+ *	changed (in IBSS mode) due to discovering more information about
+ *	the peer.
  */
 enum ieee80211_rate_control_changed {
 	IEEE80211_RC_BW_CHANGED		= BIT(0),
 	IEEE80211_RC_SMPS_CHANGED	= BIT(1),
+	IEEE80211_RC_SUPP_RATES_CHANGED	= BIT(2),
 };
 
 /**
@@ -2264,7 +2311,9 @@
  *	The callback is optional and can (should!) sleep.
  */
 struct ieee80211_ops {
-	void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+	void (*tx)(struct ieee80211_hw *hw,
+		   struct ieee80211_tx_control *control,
+		   struct sk_buff *skb);
 	int (*start)(struct ieee80211_hw *hw);
 	void (*stop)(struct ieee80211_hw *hw);
 #ifdef CONFIG_PM
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 96a3b5c..980d263 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -49,6 +49,7 @@
 #include <linux/types.h>
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
+#include <linux/hash.h>
 
 #include <net/neighbour.h>
 
@@ -134,7 +135,7 @@
 {
 	const u32 *p32 = pkey;
 
-	return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) +
+	return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) +
 		(p32[1] * hash_rnd[1]) +
 		(p32[2] * hash_rnd[2]) +
 		(p32[3] * hash_rnd[3]));
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 344d898..0dab173 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -334,18 +334,22 @@
 }
 #endif
 
-static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
 {
 	unsigned int seq;
 	int hh_len;
 
 	do {
-		int hh_alen;
-
 		seq = read_seqbegin(&hh->hh_lock);
 		hh_len = hh->hh_len;
-		hh_alen = HH_DATA_ALIGN(hh_len);
-		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+		if (likely(hh_len <= HH_DATA_MOD)) {
+			/* this is inlined by gcc */
+			memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+		} else {
+			int hh_alen = HH_DATA_ALIGN(hh_len);
+
+			memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+		}
 	} while (read_seqretry(&hh->hh_lock, seq));
 
 	skb_push(skb, hh_len);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index ae1cd6c..5ae57f1 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -15,6 +15,7 @@
 #include <net/netns/packet.h>
 #include <net/netns/ipv4.h>
 #include <net/netns/ipv6.h>
+#include <net/netns/sctp.h>
 #include <net/netns/dccp.h>
 #include <net/netns/x_tables.h>
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -66,6 +67,7 @@
 	struct hlist_head 	*dev_name_head;
 	struct hlist_head	*dev_index_head;
 	unsigned int		dev_base_seq;	/* protected by rtnl_mutex */
+	int			ifindex;
 
 	/* core fib_rules */
 	struct list_head	rules_ops;
@@ -80,6 +82,9 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	struct netns_ipv6	ipv6;
 #endif
+#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
+	struct netns_sctp	sctp;
+#endif
 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
 	struct netns_dccp	dccp;
 #endif
@@ -104,6 +109,13 @@
 	struct sock		*diag_nlsk;
 };
 
+/*
+ * ifindex generation is per-net namespace, and loopback is
+ * always the 1st device in ns (see net_dev_init), thus any
+ * loopback device should get ifindex 1
+ */
+
+#define LOOPBACK_IFINDEX	1
 
 #include <linux/seq_file_net.h>
 
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 785f37a..09175d5 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,6 +98,10 @@
  *   nla_put_u16(skb, type, value)	add u16 attribute to skb
  *   nla_put_u32(skb, type, value)	add u32 attribute to skb
  *   nla_put_u64(skb, type, value)	add u64 attribute to skb
+ *   nla_put_s8(skb, type, value)	add s8 attribute to skb
+ *   nla_put_s16(skb, type, value)	add s16 attribute to skb
+ *   nla_put_s32(skb, type, value)	add s32 attribute to skb
+ *   nla_put_s64(skb, type, value)	add s64 attribute to skb
  *   nla_put_string(skb, type, str)	add string attribute to skb
  *   nla_put_flag(skb, type)		add flag attribute to skb
  *   nla_put_msecs(skb, type, jiffies)	add msecs attribute to skb
@@ -121,6 +125,10 @@
  *   nla_get_u16(nla)			get payload for a u16 attribute
  *   nla_get_u32(nla)			get payload for a u32 attribute
  *   nla_get_u64(nla)			get payload for a u64 attribute
+ *   nla_get_s8(nla)			get payload for a s8 attribute
+ *   nla_get_s16(nla)			get payload for a s16 attribute
+ *   nla_get_s32(nla)			get payload for a s32 attribute
+ *   nla_get_s64(nla)			get payload for a s64 attribute
  *   nla_get_flag(nla)			return 1 if flag is true
  *   nla_get_msecs(nla)			get payload for a msecs attribute
  *
@@ -160,6 +168,10 @@
 	NLA_NESTED_COMPAT,
 	NLA_NUL_STRING,
 	NLA_BINARY,
+	NLA_S8,
+	NLA_S16,
+	NLA_S32,
+	NLA_S64,
 	__NLA_TYPE_MAX,
 };
 
@@ -183,6 +195,8 @@
  *    NLA_NESTED_COMPAT    Minimum length of structure payload
  *    NLA_U8, NLA_U16,
  *    NLA_U32, NLA_U64,
+ *    NLA_S8, NLA_S16,
+ *    NLA_S32, NLA_S64,
  *    NLA_MSECS            Leaving the length field zero will verify the
  *                         given type fits, using it verifies minimum length
  *                         just like "All other"
@@ -879,6 +893,50 @@
 }
 
 /**
+ * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+{
+	return nla_put(skb, attrtype, sizeof(s8), &value);
+}
+
+/**
+ * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+{
+	return nla_put(skb, attrtype, sizeof(s16), &value);
+}
+
+/**
+ * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+{
+	return nla_put(skb, attrtype, sizeof(s32), &value);
+}
+
+/**
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+{
+	return nla_put(skb, attrtype, sizeof(s64), &value);
+}
+
+/**
  * nla_put_string - Add a string netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
@@ -994,6 +1052,46 @@
 }
 
 /**
+ * nla_get_s32 - return payload of s32 attribute
+ * @nla: s32 netlink attribute
+ */
+static inline s32 nla_get_s32(const struct nlattr *nla)
+{
+	return *(s32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s16 - return payload of s16 attribute
+ * @nla: s16 netlink attribute
+ */
+static inline s16 nla_get_s16(const struct nlattr *nla)
+{
+	return *(s16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s8 - return payload of s8 attribute
+ * @nla: s8 netlink attribute
+ */
+static inline s8 nla_get_s8(const struct nlattr *nla)
+{
+	return *(s8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s64 - return payload of s64 attribute
+ * @nla: s64 netlink attribute
+ */
+static inline s64 nla_get_s64(const struct nlattr *nla)
+{
+	s64 tmp;
+
+	nla_memcpy(&tmp, nla, sizeof(tmp));
+
+	return tmp;
+}
+
+/**
  * nla_get_flag - return payload of flag attribute
  * @nla: flag netlink attribute
  */
diff --git a/include/net/netns/packet.h b/include/net/netns/packet.h
index cb4e894..17ec2b9 100644
--- a/include/net/netns/packet.h
+++ b/include/net/netns/packet.h
@@ -5,10 +5,10 @@
 #define __NETNS_PACKET_H__
 
 #include <linux/rculist.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 
 struct netns_packet {
-	spinlock_t		sklist_lock;
+	struct mutex		sklist_lock;
 	struct hlist_head	sklist;
 };
 
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
new file mode 100644
index 0000000..5e5eb1f
--- /dev/null
+++ b/include/net/netns/sctp.h
@@ -0,0 +1,131 @@
+#ifndef __NETNS_SCTP_H__
+#define __NETNS_SCTP_H__
+
+struct sock;
+struct proc_dir_entry;
+struct sctp_mib;
+struct ctl_table_header;
+
+struct netns_sctp {
+	DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
+
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *proc_net_sctp;
+#endif
+#ifdef CONFIG_SYSCTL
+	struct ctl_table_header *sysctl_header;
+#endif
+	/* This is the global socket data structure used for responding to
+	 * the Out-of-the-blue (OOTB) packets.  A control sock will be created
+	 * for this socket at the initialization time.
+	 */
+	struct sock *ctl_sock;
+
+	/* This is the global local address list.
+	 * We actively maintain this complete list of addresses on
+	 * the system by catching address add/delete events.
+	 *
+	 * It is a list of sctp_sockaddr_entry.
+	 */
+	struct list_head local_addr_list;
+	struct list_head addr_waitq;
+	struct timer_list addr_wq_timer;
+	struct list_head auto_asconf_splist;
+	spinlock_t addr_wq_lock;
+
+	/* Lock that protects the local_addr_list writers */
+	spinlock_t local_addr_lock;
+
+	/* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
+	 *
+	 * The following protocol parameters are RECOMMENDED:
+	 *
+	 * RTO.Initial		    - 3	 seconds
+	 * RTO.Min		    - 1	 second
+	 * RTO.Max		   -  60 seconds
+	 * RTO.Alpha		    - 1/8  (3 when converted to right shifts.)
+	 * RTO.Beta		    - 1/4  (2 when converted to right shifts.)
+	 */
+	unsigned int rto_initial;
+	unsigned int rto_min;
+	unsigned int rto_max;
+
+	/* Note: rto_alpha and rto_beta are really defined as inverse
+	 * powers of two to facilitate integer operations.
+	 */
+	int rto_alpha;
+	int rto_beta;
+
+	/* Max.Burst		    - 4 */
+	int max_burst;
+
+	/* Whether Cookie Preservative is enabled(1) or not(0) */
+	int cookie_preserve_enable;
+
+	/* Valid.Cookie.Life	    - 60  seconds  */
+	unsigned int valid_cookie_life;
+
+	/* Delayed SACK timeout  200ms default*/
+	unsigned int sack_timeout;
+
+	/* HB.interval		    - 30 seconds  */
+	unsigned int hb_interval;
+
+	/* Association.Max.Retrans  - 10 attempts
+	 * Path.Max.Retrans	    - 5	 attempts (per destination address)
+	 * Max.Init.Retransmits	    - 8	 attempts
+	 */
+	int max_retrans_association;
+	int max_retrans_path;
+	int max_retrans_init;
+	/* Potentially-Failed.Max.Retrans sysctl value
+	 * taken from:
+	 * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
+	 */
+	int pf_retrans;
+
+	/*
+	 * Policy for preforming sctp/socket accounting
+	 * 0   - do socket level accounting, all assocs share sk_sndbuf
+	 * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
+	 */
+	int sndbuf_policy;
+
+	/*
+	 * Policy for preforming sctp/socket accounting
+	 * 0   - do socket level accounting, all assocs share sk_rcvbuf
+	 * 1   - do sctp accounting, each asoc may use sk_rcvbuf bytes
+	 */
+	int rcvbuf_policy;
+
+	int default_auto_asconf;
+
+	/* Flag to indicate if addip is enabled. */
+	int addip_enable;
+	int addip_noauth;
+
+	/* Flag to indicate if PR-SCTP is enabled. */
+	int prsctp_enable;
+
+	/* Flag to idicate if SCTP-AUTH is enabled */
+	int auth_enable;
+
+	/*
+	 * Policy to control SCTP IPv4 address scoping
+	 * 0   - Disable IPv4 address scoping
+	 * 1   - Enable IPv4 address scoping
+	 * 2   - Selectively allow only IPv4 private addresses
+	 * 3   - Selectively allow only IPv4 link local address
+	 */
+	int scope_policy;
+
+	/* Threshold for rwnd update SACKS.  Receive buffer shifted this many
+	 * bits is an indicator of when to send and window update SACK.
+	 */
+	int rwnd_upd_shift;
+
+	/* Threshold for autoclose timeout, in seconds. */
+	unsigned long max_autoclose;
+};
+
+#endif /* __NETNS_SCTP_H__ */
diff --git a/include/net/scm.h b/include/net/scm.h
index 079d788..7dc0854 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -70,9 +70,11 @@
 }
 
 static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
-			       struct scm_cookie *scm)
+			       struct scm_cookie *scm, bool forcecreds)
 {
 	memset(scm, 0, sizeof(*scm));
+	if (forcecreds)
+		scm_set_cred(scm, task_tgid(current), current_cred());
 	unix_get_peersec_dgram(sock, scm);
 	if (msg->msg_controllen <= 0)
 		return 0;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index ff49964..9c6414f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -114,13 +114,12 @@
 /*
  * sctp/protocol.c
  */
-extern struct sock *sctp_get_ctl_sock(void);
-extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
+extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
 				     sctp_scope_t, gfp_t gfp,
 				     int flags);
 extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
 extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
-extern void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *, int);
+extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
 
 /*
  * sctp/socket.c
@@ -140,12 +139,12 @@
 /*
  * sctp/primitive.c
  */
-int sctp_primitive_ASSOCIATE(struct sctp_association *, void *arg);
-int sctp_primitive_SHUTDOWN(struct sctp_association *, void *arg);
-int sctp_primitive_ABORT(struct sctp_association *, void *arg);
-int sctp_primitive_SEND(struct sctp_association *, void *arg);
-int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg);
-int sctp_primitive_ASCONF(struct sctp_association *, void *arg);
+int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
 
 /*
  * sctp/input.c
@@ -156,7 +155,7 @@
 void sctp_unhash_established(struct sctp_association *);
 void sctp_hash_endpoint(struct sctp_endpoint *);
 void sctp_unhash_endpoint(struct sctp_endpoint *);
-struct sock *sctp_err_lookup(int family, struct sk_buff *,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
 			     struct sctphdr *, struct sctp_association **,
 			     struct sctp_transport **);
 void sctp_err_finish(struct sock *, struct sctp_association *);
@@ -173,14 +172,14 @@
 /*
  * sctp/proc.c
  */
-int sctp_snmp_proc_init(void);
-void sctp_snmp_proc_exit(void);
-int sctp_eps_proc_init(void);
-void sctp_eps_proc_exit(void);
-int sctp_assocs_proc_init(void);
-void sctp_assocs_proc_exit(void);
-int sctp_remaddr_proc_init(void);
-void sctp_remaddr_proc_exit(void);
+int sctp_snmp_proc_init(struct net *net);
+void sctp_snmp_proc_exit(struct net *net);
+int sctp_eps_proc_init(struct net *net);
+void sctp_eps_proc_exit(struct net *net);
+int sctp_assocs_proc_init(struct net *net);
+void sctp_assocs_proc_exit(struct net *net);
+int sctp_remaddr_proc_init(struct net *net);
+void sctp_remaddr_proc_exit(struct net *net);
 
 
 /*
@@ -222,11 +221,10 @@
 #define sctp_bh_unlock_sock(sk)  bh_unlock_sock(sk)
 
 /* SCTP SNMP MIB stats handlers */
-DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
-#define SCTP_INC_STATS(field)      SNMP_INC_STATS(sctp_statistics, field)
-#define SCTP_INC_STATS_BH(field)   SNMP_INC_STATS_BH(sctp_statistics, field)
-#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
-#define SCTP_DEC_STATS(field)      SNMP_DEC_STATS(sctp_statistics, field)
+#define SCTP_INC_STATS(net, field)      SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
 
 #endif /* !TEST_FRAME */
 
@@ -361,25 +359,29 @@
 #define SCTP_DBG_OBJCNT_ENTRY(name) \
 {.label= #name, .counter= &sctp_dbg_objcnt_## name}
 
-void sctp_dbg_objcnt_init(void);
-void sctp_dbg_objcnt_exit(void);
+void sctp_dbg_objcnt_init(struct net *);
+void sctp_dbg_objcnt_exit(struct net *);
 
 #else
 
 #define SCTP_DBG_OBJCNT_INC(name)
 #define SCTP_DBG_OBJCNT_DEC(name)
 
-static inline void sctp_dbg_objcnt_init(void) { return; }
-static inline void sctp_dbg_objcnt_exit(void) { return; }
+static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
+static inline void sctp_dbg_objcnt_exit(struct net *net) { return; }
 
 #endif /* CONFIG_SCTP_DBG_OBJCOUNT */
 
 #if defined CONFIG_SYSCTL
 void sctp_sysctl_register(void);
 void sctp_sysctl_unregister(void);
+int sctp_sysctl_net_register(struct net *net);
+void sctp_sysctl_net_unregister(struct net *net);
 #else
 static inline void sctp_sysctl_register(void) { return; }
 static inline void sctp_sysctl_unregister(void) { return; }
+static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
+static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
 #endif
 
 /* Size of Supported Address Parameter for 'x' address types. */
@@ -586,7 +588,6 @@
 
 extern struct proto sctp_prot;
 extern struct proto sctpv6_prot;
-extern struct proc_dir_entry *proc_net_sctp;
 void sctp_put_port(struct sock *sk);
 
 extern struct idr sctp_assocs_id;
@@ -632,21 +633,21 @@
 
 /* Warning: The following hash functions assume a power of two 'size'. */
 /* This is the hash function for the SCTP port hash table. */
-static inline int sctp_phashfn(__u16 lport)
+static inline int sctp_phashfn(struct net *net, __u16 lport)
 {
-	return lport & (sctp_port_hashsize - 1);
+	return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
 }
 
 /* This is the hash function for the endpoint hash table. */
-static inline int sctp_ep_hashfn(__u16 lport)
+static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
 {
-	return lport & (sctp_ep_hashsize - 1);
+	return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
 }
 
 /* This is the hash function for the association hash table. */
-static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
+static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport)
 {
-	int h = (lport << 16) + rport;
+	int h = (lport << 16) + rport + net_hash_mix(net);
 	h ^= h>>8;
 	return h & (sctp_assoc_hashsize - 1);
 }
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 9148632..b5887e1 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -77,7 +77,8 @@
 	int action;
 } sctp_sm_command_t;
 
-typedef sctp_disposition_t (sctp_state_fn_t) (const struct sctp_endpoint *,
+typedef sctp_disposition_t (sctp_state_fn_t) (struct net *,
+					      const struct sctp_endpoint *,
 					      const struct sctp_association *,
 					      const sctp_subtype_t type,
 					      void *arg,
@@ -178,7 +179,8 @@
 
 /* Prototypes for utility support functions.  */
 __u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *,
+					    sctp_event_t,
 					    sctp_state_t,
 					    sctp_subtype_t);
 int sctp_chunk_iif(const struct sctp_chunk *);
@@ -268,7 +270,7 @@
 
 /* Prototypes for statetable processing. */
 
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
 	       sctp_state_t state,
                struct sctp_endpoint *,
                struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index fc5e600..0fef00f 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -102,6 +102,7 @@
 	unsigned short	fastreuse;
 	struct hlist_node	node;
 	struct hlist_head	owner;
+	struct net	*net;
 };
 
 struct sctp_bind_hashbucket {
@@ -118,69 +119,6 @@
 
 /* The SCTP globals structure. */
 extern struct sctp_globals {
-	/* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
-	 *
-	 * The following protocol parameters are RECOMMENDED:
-	 *
-	 * RTO.Initial		    - 3	 seconds
-	 * RTO.Min		    - 1	 second
-	 * RTO.Max		   -  60 seconds
-	 * RTO.Alpha		    - 1/8  (3 when converted to right shifts.)
-	 * RTO.Beta		    - 1/4  (2 when converted to right shifts.)
-	 */
-	unsigned int rto_initial;
-	unsigned int rto_min;
-	unsigned int rto_max;
-
-	/* Note: rto_alpha and rto_beta are really defined as inverse
-	 * powers of two to facilitate integer operations.
-	 */
-	int rto_alpha;
-	int rto_beta;
-
-	/* Max.Burst		    - 4 */
-	int max_burst;
-
-	/* Whether Cookie Preservative is enabled(1) or not(0) */
-	int cookie_preserve_enable;
-
-	/* Valid.Cookie.Life	    - 60  seconds  */
-	unsigned int valid_cookie_life;
-
-	/* Delayed SACK timeout  200ms default*/
-	unsigned int sack_timeout;
-
-	/* HB.interval		    - 30 seconds  */
-	unsigned int hb_interval;
-
-	/* Association.Max.Retrans  - 10 attempts
-	 * Path.Max.Retrans	    - 5	 attempts (per destination address)
-	 * Max.Init.Retransmits	    - 8	 attempts
-	 */
-	int max_retrans_association;
-	int max_retrans_path;
-	int max_retrans_init;
-
-	/* Potentially-Failed.Max.Retrans sysctl value
-	 * taken from:
-	 * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
-	 */
-	int pf_retrans;
-
-	/*
-	 * Policy for preforming sctp/socket accounting
-	 * 0   - do socket level accounting, all assocs share sk_sndbuf
-	 * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
-	 */
-	int sndbuf_policy;
-
-	/*
-	 * Policy for preforming sctp/socket accounting
-	 * 0   - do socket level accounting, all assocs share sk_rcvbuf
-	 * 1   - do sctp accounting, each asoc may use sk_rcvbuf bytes
-	 */
-	int rcvbuf_policy;
-
 	/* The following variables are implementation specific.	 */
 
 	/* Default initialization values to be applied to new associations. */
@@ -204,70 +142,11 @@
 	int port_hashsize;
 	struct sctp_bind_hashbucket *port_hashtable;
 
-	/* This is the global local address list.
-	 * We actively maintain this complete list of addresses on
-	 * the system by catching address add/delete events.
-	 *
-	 * It is a list of sctp_sockaddr_entry.
-	 */
-	struct list_head local_addr_list;
-	int default_auto_asconf;
-	struct list_head addr_waitq;
-	struct timer_list addr_wq_timer;
-	struct list_head auto_asconf_splist;
-	spinlock_t addr_wq_lock;
-
-	/* Lock that protects the local_addr_list writers */
-	spinlock_t addr_list_lock;
-	
-	/* Flag to indicate if addip is enabled. */
-	int addip_enable;
-	int addip_noauth_enable;
-
-	/* Flag to indicate if PR-SCTP is enabled. */
-	int prsctp_enable;
-
-	/* Flag to idicate if SCTP-AUTH is enabled */
-	int auth_enable;
-
-	/*
-	 * Policy to control SCTP IPv4 address scoping
-	 * 0   - Disable IPv4 address scoping
-	 * 1   - Enable IPv4 address scoping
-	 * 2   - Selectively allow only IPv4 private addresses
-	 * 3   - Selectively allow only IPv4 link local address
-	 */
-	int ipv4_scope_policy;
-
 	/* Flag to indicate whether computing and verifying checksum
 	 * is disabled. */
         bool checksum_disable;
-
-	/* Threshold for rwnd update SACKS.  Receive buffer shifted this many
-	 * bits is an indicator of when to send and window update SACK.
-	 */
-	int rwnd_update_shift;
-
-	/* Threshold for autoclose timeout, in seconds. */
-	unsigned long max_autoclose;
 } sctp_globals;
 
-#define sctp_rto_initial		(sctp_globals.rto_initial)
-#define sctp_rto_min			(sctp_globals.rto_min)
-#define sctp_rto_max			(sctp_globals.rto_max)
-#define sctp_rto_alpha			(sctp_globals.rto_alpha)
-#define sctp_rto_beta			(sctp_globals.rto_beta)
-#define sctp_max_burst			(sctp_globals.max_burst)
-#define sctp_valid_cookie_life		(sctp_globals.valid_cookie_life)
-#define sctp_cookie_preserve_enable	(sctp_globals.cookie_preserve_enable)
-#define sctp_max_retrans_association	(sctp_globals.max_retrans_association)
-#define sctp_sndbuf_policy	 	(sctp_globals.sndbuf_policy)
-#define sctp_rcvbuf_policy	 	(sctp_globals.rcvbuf_policy)
-#define sctp_max_retrans_path		(sctp_globals.max_retrans_path)
-#define sctp_pf_retrans			(sctp_globals.pf_retrans)
-#define sctp_max_retrans_init		(sctp_globals.max_retrans_init)
-#define sctp_sack_timeout		(sctp_globals.sack_timeout)
-#define sctp_hb_interval		(sctp_globals.hb_interval)
 #define sctp_max_instreams		(sctp_globals.max_instreams)
 #define sctp_max_outstreams		(sctp_globals.max_outstreams)
 #define sctp_address_families		(sctp_globals.address_families)
@@ -277,21 +156,7 @@
 #define sctp_assoc_hashtable		(sctp_globals.assoc_hashtable)
 #define sctp_port_hashsize		(sctp_globals.port_hashsize)
 #define sctp_port_hashtable		(sctp_globals.port_hashtable)
-#define sctp_local_addr_list		(sctp_globals.local_addr_list)
-#define sctp_local_addr_lock		(sctp_globals.addr_list_lock)
-#define sctp_auto_asconf_splist		(sctp_globals.auto_asconf_splist)
-#define sctp_addr_waitq			(sctp_globals.addr_waitq)
-#define sctp_addr_wq_timer		(sctp_globals.addr_wq_timer)
-#define sctp_addr_wq_lock		(sctp_globals.addr_wq_lock)
-#define sctp_default_auto_asconf	(sctp_globals.default_auto_asconf)
-#define sctp_scope_policy		(sctp_globals.ipv4_scope_policy)
-#define sctp_addip_enable		(sctp_globals.addip_enable)
-#define sctp_addip_noauth		(sctp_globals.addip_noauth_enable)
-#define sctp_prsctp_enable		(sctp_globals.prsctp_enable)
-#define sctp_auth_enable		(sctp_globals.auth_enable)
 #define sctp_checksum_disable		(sctp_globals.checksum_disable)
-#define sctp_rwnd_upd_shift		(sctp_globals.rwnd_update_shift)
-#define sctp_max_autoclose		(sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
@@ -1085,7 +950,7 @@
 	__u64 hb_nonce;
 };
 
-struct sctp_transport *sctp_transport_new(const union sctp_addr *,
+struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *,
 					  gfp_t);
 void sctp_transport_set_owner(struct sctp_transport *,
 			      struct sctp_association *);
@@ -1240,7 +1105,7 @@
 
 void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
 void sctp_bind_addr_free(struct sctp_bind_addr *);
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
 			const struct sctp_bind_addr *src,
 			sctp_scope_t scope, gfp_t gfp,
 			int flags);
@@ -1267,7 +1132,7 @@
 			   __u16 port, gfp_t gfp);
 
 sctp_scope_t sctp_scope(const union sctp_addr *);
-int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
 int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
 int sctp_addr_is_valid(const union sctp_addr *addr);
 int sctp_is_ep_boundall(struct sock *sk);
@@ -1425,13 +1290,13 @@
 int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
 				const union sctp_addr *);
 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
-					const union sctp_addr *);
-int sctp_has_association(const union sctp_addr *laddr,
+					struct net *, const union sctp_addr *);
+int sctp_has_association(struct net *net, const union sctp_addr *laddr,
 			 const union sctp_addr *paddr);
 
-int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t,
-		     sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
-		     struct sctp_chunk **err_chunk);
+int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
+		     sctp_cid_t, sctp_init_chunk_t *peer_init,
+		     struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
 int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
 		      const union sctp_addr *peer,
 		      sctp_init_chunk_t *init, gfp_t gfp);
@@ -2013,6 +1878,7 @@
 				  sctp_transport_cmd_t, sctp_sn_error_t);
 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
+					   struct net *,
 					   const union sctp_addr *,
 					   const union sctp_addr *);
 void sctp_assoc_migrate(struct sctp_association *, struct sock *);
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 0147b90..7159626 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -154,13 +154,15 @@
  */
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)	\
 	do { \
-		this_cpu_inc(mib[0]->mibs[basefield##PKTS]);		\
-		this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);	\
+		__typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;	\
+		this_cpu_inc(ptr[basefield##PKTS]);		\
+		this_cpu_add(ptr[basefield##OCTETS], addend);	\
 	} while (0)
 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)	\
 	do { \
-		__this_cpu_inc(mib[0]->mibs[basefield##PKTS]);		\
-		__this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);	\
+		__typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;	\
+		__this_cpu_inc(ptr[basefield##PKTS]);		\
+		__this_cpu_add(ptr[basefield##OCTETS], addend);	\
 	} while (0)
 
 
diff --git a/include/net/sock.h b/include/net/sock.h
index 9d43736..84bdaec 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -218,6 +218,7 @@
   *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
   *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
   *	@sk_gso_max_size: Maximum GSO segment size to build
+  *	@sk_gso_max_segs: Maximum number of GSO segments
   *	@sk_lingertime: %SO_LINGER l_linger setting
   *	@sk_backlog: always used with the per-socket spinlock held
   *	@sk_callback_lock: used with the callbacks in the end of this struct
@@ -338,6 +339,7 @@
 	netdev_features_t	sk_route_nocaps;
 	int			sk_gso_type;
 	unsigned int		sk_gso_max_size;
+	u16			sk_gso_max_segs;
 	int			sk_rcvlowat;
 	unsigned long	        sk_lingertime;
 	struct sk_buff_head	sk_error_queue;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 91e7467..9a0021d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -464,6 +464,7 @@
 void tcp_connect_init(struct sock *sk);
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
+void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 
 /* From syncookies.c */
 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d9509eb..36ad56b 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -213,6 +213,9 @@
 	struct xfrm_lifetime_cur curlft;
 	struct tasklet_hrtimer	mtimer;
 
+	/* used to fix curlft->add_time when changing date */
+	long		saved_tmo;
+
 	/* Last used time */
 	unsigned long		lastused;
 
@@ -238,6 +241,7 @@
 
 /* xflags - make enum if more show up */
 #define XFRM_TIME_DEFER	1
+#define XFRM_SOFT_EXPIRE 2
 
 enum {
 	XFRM_STATE_VOID,
@@ -288,6 +292,8 @@
 						  struct flowi *fl,
 						  int reverse);
 	int			(*get_tos)(const struct flowi *fl);
+	void			(*init_dst)(struct net *net,
+					    struct xfrm_dst *dst);
 	int			(*init_path)(struct xfrm_dst *path,
 					     struct dst_entry *dst,
 					     int nfheader_len);
@@ -567,7 +573,7 @@
 	struct list_head	list;
 	char			*id;
 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
-	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
+	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index c75c0d1..cdca2ab 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1075,7 +1075,8 @@
 const char *snd_pcm_format_name(snd_pcm_format_t format);
 
 /**
- * Get a string naming the direction of a stream
+ * snd_pcm_stream_str - Get a string naming the direction of a stream
+ * @substream: the pcm substream instance
  */
 static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream)
 {
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index ea7a203..5a8671e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -73,6 +73,9 @@
 		__entry->prio		= p->prio;
 		__entry->success	= success;
 		__entry->target_cpu	= task_cpu(p);
+	)
+	TP_perf_assign(
+		__perf_task(p);
 	),
 
 	TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
@@ -325,6 +328,7 @@
 	)
 	TP_perf_assign(
 		__perf_count(delay);
+		__perf_task(tsk);
 	),
 
 	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6bc2fa..a763888 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -712,6 +712,9 @@
 #undef __perf_count
 #define __perf_count(c) __count = (c)
 
+#undef __perf_task
+#define __perf_task(t) __task = (t)
+
 #undef TP_perf_assign
 #define TP_perf_assign(args...) args
 
@@ -725,6 +728,7 @@
 	struct ftrace_raw_##call *entry;				\
 	struct pt_regs __regs;						\
 	u64 __addr = 0, __count = 1;					\
+	struct task_struct *__task = NULL;				\
 	struct hlist_head *head;					\
 	int __entry_size;						\
 	int __data_size;						\
@@ -752,7 +756,7 @@
 									\
 	head = this_cpu_ptr(event_call->perf_events);			\
 	perf_trace_buf_submit(entry, __entry_size, rctx, __addr,	\
-		__count, &__regs, head);				\
+		__count, &__regs, head, __task);			\
 }
 
 /*
diff --git a/init/main.c b/init/main.c
index e60679d..b286730 100644
--- a/init/main.c
+++ b/init/main.c
@@ -461,10 +461,6 @@
 	percpu_init_late();
 	pgtable_cache_init();
 	vmalloc_init();
-#ifdef CONFIG_X86
-	if (efi_enabled)
-		efi_enter_virtual_mode();
-#endif
 }
 
 asmlinkage void __init start_kernel(void)
@@ -606,6 +602,10 @@
 	calibrate_delay();
 	pidmap_init();
 	anon_vma_init();
+#ifdef CONFIG_X86
+	if (efi_enabled)
+		efi_enter_virtual_mode();
+#endif
 	thread_info_cache_init();
 	cred_init();
 	fork_init(totalram_pages);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 3a5ca58..ed206fd 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -250,7 +250,6 @@
 		spin_unlock(&hash_lock);
 		spin_unlock(&entry->lock);
 		fsnotify_destroy_mark(entry);
-		fsnotify_put_mark(entry);
 		goto out;
 	}
 
@@ -259,7 +258,7 @@
 
 	fsnotify_duplicate_mark(&new->mark, entry);
 	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
-		free_chunk(new);
+		fsnotify_put_mark(&new->mark);
 		goto Fallback;
 	}
 
@@ -293,7 +292,7 @@
 	spin_unlock(&hash_lock);
 	spin_unlock(&entry->lock);
 	fsnotify_destroy_mark(entry);
-	fsnotify_put_mark(entry);
+	fsnotify_put_mark(&new->mark);	/* drop initial reference */
 	goto out;
 
 Fallback:
@@ -322,7 +321,7 @@
 
 	entry = &chunk->mark;
 	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
-		free_chunk(chunk);
+		fsnotify_put_mark(entry);
 		return -ENOSPC;
 	}
 
@@ -347,6 +346,7 @@
 	insert_hash(chunk);
 	spin_unlock(&hash_lock);
 	spin_unlock(&entry->lock);
+	fsnotify_put_mark(entry);	/* drop initial reference */
 	return 0;
 }
 
@@ -396,7 +396,7 @@
 	fsnotify_duplicate_mark(chunk_entry, old_entry);
 	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
 		spin_unlock(&old_entry->lock);
-		free_chunk(chunk);
+		fsnotify_put_mark(chunk_entry);
 		fsnotify_put_mark(old_entry);
 		return -ENOSPC;
 	}
@@ -444,8 +444,8 @@
 	spin_unlock(&chunk_entry->lock);
 	spin_unlock(&old_entry->lock);
 	fsnotify_destroy_mark(old_entry);
+	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
 	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
-	fsnotify_put_mark(old_entry); /* and kill it */
 	return 0;
 }
 
@@ -916,7 +916,12 @@
 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 
 	evict_chunk(chunk);
-	fsnotify_put_mark(entry);
+
+	/*
+	 * We are guaranteed to have at least one reference to the mark from
+	 * either the inode or the caller of fsnotify_destroy_mark().
+	 */
+	BUG_ON(atomic_read(&entry->refcnt) < 1);
 }
 
 static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 8b68ce7..be7b33b 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -12,6 +12,7 @@
 #include <linux/kdb.h>
 #include <linux/kdebug.h>
 #include <linux/export.h>
+#include <linux/hardirq.h>
 #include "kdb_private.h"
 #include "../debug_core.h"
 
@@ -52,6 +53,9 @@
 	if (atomic_read(&kgdb_setting_breakpoint))
 		reason = KDB_REASON_KEYBOARD;
 
+	if (in_nmi())
+		reason = KDB_REASON_NMI;
+
 	for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
 		if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
 			reason = KDB_REASON_BREAK;
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index bb9520f..0a69d2a 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -715,9 +715,6 @@
 	/* check for having reached the LINES number of printed lines */
 	if (kdb_nextline == linecount) {
 		char buf1[16] = "";
-#if defined(CONFIG_SMP)
-		char buf2[32];
-#endif
 
 		/* Watch out for recursion here.  Any routine that calls
 		 * kdb_printf will come back through here.  And kdb_read
@@ -732,14 +729,6 @@
 		if (moreprompt == NULL)
 			moreprompt = "more> ";
 
-#if defined(CONFIG_SMP)
-		if (strchr(moreprompt, '%')) {
-			sprintf(buf2, moreprompt, get_cpu());
-			put_cpu();
-			moreprompt = buf2;
-		}
-#endif
-
 		kdb_input_flush();
 		c = console_drivers;
 
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 1f91413..31df170 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -139,11 +139,10 @@
 static char *__env[] = {
 #if defined(CONFIG_SMP)
  "PROMPT=[%d]kdb> ",
- "MOREPROMPT=[%d]more> ",
 #else
  "PROMPT=kdb> ",
- "MOREPROMPT=more> ",
 #endif
+ "MOREPROMPT=more> ",
  "RADIX=16",
  "MDCOUNT=8",			/* lines of md output */
  KDB_PLATFORM_ENV,
@@ -1236,18 +1235,6 @@
 		*cmdbuf = '\0';
 		*(cmd_hist[cmd_head]) = '\0';
 
-		if (KDB_FLAG(ONLY_DO_DUMP)) {
-			/* kdb is off but a catastrophic error requires a dump.
-			 * Take the dump and reboot.
-			 * Turn on logging so the kdb output appears in the log
-			 * buffer in the dump.
-			 */
-			const char *setargs[] = { "set", "LOGGING", "1" };
-			kdb_set(2, setargs);
-			kdb_reboot(0, NULL);
-			/*NOTREACHED*/
-		}
-
 do_full_getstr:
 #if defined(CONFIG_SMP)
 		snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 6581a04..98d4597 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -153,7 +153,8 @@
 	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
 }
 
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+struct perf_callchain_entry *
+perf_callchain(struct perf_event *event, struct pt_regs *regs)
 {
 	int rctx;
 	struct perf_callchain_entry *entry;
@@ -178,6 +179,12 @@
 	}
 
 	if (regs) {
+		/*
+		 * Disallow cross-task user callchains.
+		 */
+		if (event->ctx->task && event->ctx->task != current)
+			goto exit_put;
+
 		perf_callchain_store(entry, PERF_CONTEXT_USER);
 		perf_callchain_user(entry, regs);
 	}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f1cf0ed..b7935fc 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4039,7 +4039,7 @@
 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
 		int size = 1;
 
-		data->callchain = perf_callchain(regs);
+		data->callchain = perf_callchain(event, regs);
 
 		if (data->callchain)
 			size += data->callchain->nr;
@@ -5209,7 +5209,8 @@
 }
 
 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
-		   struct pt_regs *regs, struct hlist_head *head, int rctx)
+		   struct pt_regs *regs, struct hlist_head *head, int rctx,
+		   struct task_struct *task)
 {
 	struct perf_sample_data data;
 	struct perf_event *event;
@@ -5228,6 +5229,31 @@
 			perf_swevent_event(event, count, &data, regs);
 	}
 
+	/*
+	 * If we got specified a target task, also iterate its context and
+	 * deliver this event there too.
+	 */
+	if (task && task != current) {
+		struct perf_event_context *ctx;
+		struct trace_entry *entry = record;
+
+		rcu_read_lock();
+		ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
+		if (!ctx)
+			goto unlock;
+
+		list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+			if (event->attr.type != PERF_TYPE_TRACEPOINT)
+				continue;
+			if (event->attr.config != entry->type)
+				continue;
+			if (perf_tp_event_match(event, &data, regs))
+				perf_swevent_event(event, count, &data, regs);
+		}
+unlock:
+		rcu_read_unlock();
+	}
+
 	perf_swevent_put_recursion_context(rctx);
 }
 EXPORT_SYMBOL_GPL(perf_tp_event);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index b0b107f..a096c19 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -101,7 +101,8 @@
 }
 
 /* Callchain handling */
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern struct perf_callchain_entry *
+perf_callchain(struct perf_event *event, struct pt_regs *regs);
 extern int get_callchain_buffers(void);
 extern void put_callchain_buffers(void);
 
diff --git a/kernel/futex.c b/kernel/futex.c
index e2b0fb9..3717e7b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2231,11 +2231,11 @@
  * @uaddr2:	the pi futex we will take prior to returning to user-space
  *
  * The caller will wait on uaddr and will be requeued by futex_requeue() to
- * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and
- * complete the acquisition of the rt_mutex prior to returning to userspace.
- * This ensures the rt_mutex maintains an owner when it has waiters; without
- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
- * need to.
+ * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
+ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
+ * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
+ * without one, the pi logic would not know which task to boost/deboost, if
+ * there was a need to.
  *
  * We call schedule in futex_wait_queue_me() when we enqueue and return there
  * via the following:
@@ -2272,6 +2272,9 @@
 	struct futex_q q = futex_q_init;
 	int res, ret;
 
+	if (uaddr == uaddr2)
+		return -EINVAL;
+
 	if (!bitset)
 		return -EINVAL;
 
@@ -2343,7 +2346,7 @@
 		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
 		 * the pi_state.
 		 */
-		WARN_ON(!&q.pi_state);
+		WARN_ON(!q.pi_state);
 		pi_mutex = &q.pi_state->pi_mutex;
 		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
 		debug_rt_mutex_free_waiter(&rt_waiter);
@@ -2370,7 +2373,7 @@
 	 * fault, unlock the rt_mutex and return the fault to userspace.
 	 */
 	if (ret == -EFAULT) {
-		if (rt_mutex_owner(pi_mutex) == current)
+		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
 			rt_mutex_unlock(pi_mutex);
 	} else if (ret == -EINTR) {
 		/*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a8e8f0..4c69326 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -944,6 +944,18 @@
 	}
 
 	/*
+	 * Drivers are often written to work w/o knowledge about the
+	 * underlying irq chip implementation, so a request for a
+	 * threaded irq without a primary hard irq context handler
+	 * requires the ONESHOT flag to be set. Some irq chips like
+	 * MSI based interrupts are per se one shot safe. Check the
+	 * chip flags, so we can avoid the unmask dance at the end of
+	 * the threaded handler for those.
+	 */
+	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
+		new->flags &= ~IRQF_ONESHOT;
+
+	/*
 	 * The following block of code has to be executed atomically
 	 */
 	raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1017,7 +1029,8 @@
 		 */
 		new->thread_mask = 1 << ffz(thread_mask);
 
-	} else if (new->handler == irq_default_primary_handler) {
+	} else if (new->handler == irq_default_primary_handler &&
+		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
 		/*
 		 * The interrupt was requested with handler = NULL, so
 		 * we use the default primary handler for it. But it
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1da39ea..c8b7446 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -178,9 +178,6 @@
 	arch_suspend_enable_irqs();
 	BUG_ON(irqs_disabled());
 
-	/* Kick the lockup detector */
-	lockup_detector_bootcpu_resume();
-
  Enable_cpus:
 	enable_nonboot_cpus();
 
diff --git a/kernel/printk.c b/kernel/printk.c
index 6a76ab9..66a2ea3 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1034,6 +1034,7 @@
 			struct log *msg = log_from_idx(idx);
 
 			len += msg_print_text(msg, prev, true, NULL, 0);
+			prev = msg->flags;
 			idx = log_next(idx);
 			seq++;
 		}
@@ -1046,6 +1047,7 @@
 			struct log *msg = log_from_idx(idx);
 
 			len -= msg_print_text(msg, prev, true, NULL, 0);
+			prev = msg->flags;
 			idx = log_next(idx);
 			seq++;
 		}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d325c4b..fbf1fd0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3142,6 +3142,20 @@
 # define nsecs_to_cputime(__nsecs)	nsecs_to_jiffies(__nsecs)
 #endif
 
+static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
+{
+	u64 temp = (__force u64) rtime;
+
+	temp *= (__force u64) utime;
+
+	if (sizeof(cputime_t) == 4)
+		temp = div_u64(temp, (__force u32) total);
+	else
+		temp = div64_u64(temp, (__force u64) total);
+
+	return (__force cputime_t) temp;
+}
+
 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
 	cputime_t rtime, utime = p->utime, total = utime + p->stime;
@@ -3151,13 +3165,9 @@
 	 */
 	rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
 
-	if (total) {
-		u64 temp = (__force u64) rtime;
-
-		temp *= (__force u64) utime;
-		do_div(temp, (__force u32) total);
-		utime = (__force cputime_t) temp;
-	} else
+	if (total)
+		utime = scale_utime(utime, rtime, total);
+	else
 		utime = rtime;
 
 	/*
@@ -3184,13 +3194,9 @@
 	total = cputime.utime + cputime.stime;
 	rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
 
-	if (total) {
-		u64 temp = (__force u64) rtime;
-
-		temp *= (__force u64) cputime.utime;
-		do_div(temp, (__force u32) total);
-		utime = (__force cputime_t) temp;
-	} else
+	if (total)
+		utime = scale_utime(cputime.utime, rtime, total);
+	else
 		utime = rtime;
 
 	sig->prev_utime = max(sig->prev_utime, utime);
@@ -4340,9 +4346,7 @@
 	 */
 	if (unlikely(policy == p->policy && (!rt_policy(policy) ||
 			param->sched_priority == p->rt_priority))) {
-
-		__task_rq_unlock(rq);
-		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+		task_rq_unlock(rq, p, &flags);
 		return 0;
 	}
 
@@ -7248,6 +7252,7 @@
 
 #ifdef CONFIG_CGROUP_SCHED
 struct task_group root_task_group;
+LIST_HEAD(task_groups);
 #endif
 
 DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index d72586f..23aa789 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -65,8 +65,8 @@
 int cpupri_find(struct cpupri *cp, struct task_struct *p,
 		struct cpumask *lowest_mask)
 {
-	int                  idx      = 0;
-	int                  task_pri = convert_prio(p->prio);
+	int idx = 0;
+	int task_pri = convert_prio(p->prio);
 
 	if (task_pri >= MAX_RT_PRIO)
 		return 0;
@@ -137,9 +137,9 @@
  */
 void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 {
-	int                 *currpri = &cp->cpu_to_pri[cpu];
-	int                  oldpri  = *currpri;
-	int                  do_mb = 0;
+	int *currpri = &cp->cpu_to_pri[cpu];
+	int oldpri = *currpri;
+	int do_mb = 0;
 
 	newpri = convert_prio(newpri);
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22321db..c219bf8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3069,6 +3069,9 @@
 	int			new_dst_cpu;
 	enum cpu_idle_type	idle;
 	long			imbalance;
+	/* The set of CPUs under consideration for load-balancing */
+	struct cpumask		*cpus;
+
 	unsigned int		flags;
 
 	unsigned int		loop;
@@ -3384,6 +3387,14 @@
 
 static void update_h_load(long cpu)
 {
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long now = jiffies;
+
+	if (rq->h_load_throttle == now)
+		return;
+
+	rq->h_load_throttle = now;
+
 	rcu_read_lock();
 	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
 	rcu_read_unlock();
@@ -3653,8 +3664,7 @@
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
 			struct sched_group *group, int load_idx,
-			int local_group, const struct cpumask *cpus,
-			int *balance, struct sg_lb_stats *sgs)
+			int local_group, int *balance, struct sg_lb_stats *sgs)
 {
 	unsigned long nr_running, max_nr_running, min_nr_running;
 	unsigned long load, max_cpu_load, min_cpu_load;
@@ -3671,7 +3681,7 @@
 	max_nr_running = 0;
 	min_nr_running = ~0UL;
 
-	for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
 		struct rq *rq = cpu_rq(i);
 
 		nr_running = rq->nr_running;
@@ -3800,8 +3810,7 @@
  * @sds: variable to hold the statistics for this sched_domain.
  */
 static inline void update_sd_lb_stats(struct lb_env *env,
-				      const struct cpumask *cpus,
-				      int *balance, struct sd_lb_stats *sds)
+					int *balance, struct sd_lb_stats *sds)
 {
 	struct sched_domain *child = env->sd->child;
 	struct sched_group *sg = env->sd->groups;
@@ -3818,8 +3827,7 @@
 
 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
 		memset(&sgs, 0, sizeof(sgs));
-		update_sg_lb_stats(env, sg, load_idx, local_group,
-				   cpus, balance, &sgs);
+		update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
 
 		if (local_group && !(*balance))
 			return;
@@ -4055,7 +4063,6 @@
  * to restore balance.
  *
  * @env: The load balancing environment.
- * @cpus: The set of CPUs under consideration for load-balancing.
  * @balance: Pointer to a variable indicating if this_cpu
  *	is the appropriate cpu to perform load balancing at this_level.
  *
@@ -4065,7 +4072,7 @@
  *		   put to idle by rebalancing its tasks onto our group.
  */
 static struct sched_group *
-find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
+find_busiest_group(struct lb_env *env, int *balance)
 {
 	struct sd_lb_stats sds;
 
@@ -4075,7 +4082,7 @@
 	 * Compute the various statistics relavent for load balancing at
 	 * this level.
 	 */
-	update_sd_lb_stats(env, cpus, balance, &sds);
+	update_sd_lb_stats(env, balance, &sds);
 
 	/*
 	 * this_cpu is not the appropriate cpu to perform load balancing at
@@ -4155,8 +4162,7 @@
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
 static struct rq *find_busiest_queue(struct lb_env *env,
-				     struct sched_group *group,
-				     const struct cpumask *cpus)
+				     struct sched_group *group)
 {
 	struct rq *busiest = NULL, *rq;
 	unsigned long max_load = 0;
@@ -4171,7 +4177,7 @@
 		if (!capacity)
 			capacity = fix_small_capacity(env->sd, group);
 
-		if (!cpumask_test_cpu(i, cpus))
+		if (!cpumask_test_cpu(i, env->cpus))
 			continue;
 
 		rq = cpu_rq(i);
@@ -4252,6 +4258,7 @@
 		.dst_grpmask    = sched_group_cpus(sd->groups),
 		.idle		= idle,
 		.loop_break	= sched_nr_migrate_break,
+		.cpus		= cpus,
 	};
 
 	cpumask_copy(cpus, cpu_active_mask);
@@ -4260,7 +4267,7 @@
 	schedstat_inc(sd, lb_count[idle]);
 
 redo:
-	group = find_busiest_group(&env, cpus, balance);
+	group = find_busiest_group(&env, balance);
 
 	if (*balance == 0)
 		goto out_balanced;
@@ -4270,7 +4277,7 @@
 		goto out_balanced;
 	}
 
-	busiest = find_busiest_queue(&env, group, cpus);
+	busiest = find_busiest_queue(&env, group);
 	if (!busiest) {
 		schedstat_inc(sd, lb_nobusyq[idle]);
 		goto out_balanced;
@@ -4294,11 +4301,10 @@
 		env.src_rq    = busiest;
 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 
+		update_h_load(env.src_cpu);
 more_balance:
 		local_irq_save(flags);
 		double_rq_lock(this_rq, busiest);
-		if (!env.loop)
-			update_h_load(env.src_cpu);
 
 		/*
 		 * cur_ld_moved - load moved in current iteration
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 573e1ca..944cb68 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -788,6 +788,19 @@
 	const struct cpumask *span;
 
 	span = sched_rt_period_mask();
+#ifdef CONFIG_RT_GROUP_SCHED
+	/*
+	 * FIXME: isolated CPUs should really leave the root task group,
+	 * whether they are isolcpus or were isolated via cpusets, lest
+	 * the timer run on a CPU which does not service all runqueues,
+	 * potentially leaving other CPUs indefinitely throttled.  If
+	 * isolation is really required, the user will turn the throttle
+	 * off to kill the perturbations it causes anyway.  Meanwhile,
+	 * this maintains functionality for boot and/or troubleshooting.
+	 */
+	if (rt_b == &root_task_group.rt_bandwidth)
+		span = cpu_online_mask;
+#endif
 	for_each_cpu(i, span) {
 		int enqueue = 0;
 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c35a1a7..f6714d0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -80,7 +80,7 @@
 struct cfs_rq;
 struct rt_rq;
 
-static LIST_HEAD(task_groups);
+extern struct list_head task_groups;
 
 struct cfs_bandwidth {
 #ifdef CONFIG_CFS_BANDWIDTH
@@ -374,7 +374,11 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	/* list of leaf cfs_rq on this cpu: */
 	struct list_head leaf_cfs_rq_list;
-#endif
+#ifdef CONFIG_SMP
+	unsigned long h_load_throttle;
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
 #ifdef CONFIG_RT_GROUP_SCHED
 	struct list_head leaf_rt_rq_list;
 #endif
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 7b386e8..da5eb5b 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -27,8 +27,10 @@
 {
 	struct task_struct *stop = rq->stop;
 
-	if (stop && stop->on_rq)
+	if (stop && stop->on_rq) {
+		stop->se.exec_start = rq->clock_task;
 		return stop;
+	}
 
 	return NULL;
 }
@@ -52,6 +54,21 @@
 
 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
 {
+	struct task_struct *curr = rq->curr;
+	u64 delta_exec;
+
+	delta_exec = rq->clock_task - curr->se.exec_start;
+	if (unlikely((s64)delta_exec < 0))
+		delta_exec = 0;
+
+	schedstat_set(curr->se.statistics.exec_max,
+			max(curr->se.statistics.exec_max, delta_exec));
+
+	curr->se.sum_exec_runtime += delta_exec;
+	account_group_exec_runtime(curr, delta_exec);
+
+	curr->se.exec_start = rq->clock_task;
+	cpuacct_charge(curr, delta_exec);
 }
 
 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
@@ -60,6 +77,9 @@
 
 static void set_curr_task_stop(struct rq *rq)
 {
+	struct task_struct *stop = rq->stop;
+
+	stop->se.exec_start = rq->clock_task;
 }
 
 static void switched_to_stop(struct rq *rq, struct task_struct *p)
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 91d4e17..d320d44 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -75,6 +75,7 @@
 			p = q->next;
 			q->func(q);
 			q = p;
+			cond_resched();
 		}
 	}
 }
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a470154..46da053 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -37,7 +37,7 @@
  * requested HZ value. It is also not recommended
  * for "tick-less" systems.
  */
-#define NSEC_PER_JIFFY	((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
+#define NSEC_PER_JIFFY	((u32)((((u64)NSEC_PER_SEC)<<8)/SHIFTED_HZ))
 
 /* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
  * conversion, the .shift value could be zero. However
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index b7fbadc..24174b4 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -28,7 +28,7 @@
 /* USER_HZ period (usecs): */
 unsigned long			tick_usec = TICK_USEC;
 
-/* ACTHZ period (nsecs): */
+/* SHIFTED_HZ period (nsecs): */
 unsigned long			tick_nsec;
 
 static u64			tick_length;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f045cc5..e16af19 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -65,14 +65,14 @@
 	 * used instead.
 	 */
 	struct timespec		wall_to_monotonic;
-	/* time spent in suspend */
-	struct timespec		total_sleep_time;
-	/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
-	struct timespec		raw_time;
 	/* Offset clock monotonic -> clock realtime */
 	ktime_t			offs_real;
+	/* time spent in suspend */
+	struct timespec		total_sleep_time;
 	/* Offset clock monotonic -> clock boottime */
 	ktime_t			offs_boot;
+	/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
+	struct timespec		raw_time;
 	/* Seqlock for all timekeeper values */
 	seqlock_t		lock;
 };
@@ -108,13 +108,38 @@
 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
 {
 	tk->xtime_sec = ts->tv_sec;
-	tk->xtime_nsec = ts->tv_nsec << tk->shift;
+	tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
 }
 
 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
 {
 	tk->xtime_sec += ts->tv_sec;
-	tk->xtime_nsec += ts->tv_nsec << tk->shift;
+	tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
+}
+
+static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
+{
+	struct timespec tmp;
+
+	/*
+	 * Verify consistency of: offset_real = -wall_to_monotonic
+	 * before modifying anything
+	 */
+	set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
+					-tk->wall_to_monotonic.tv_nsec);
+	WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
+	tk->wall_to_monotonic = wtm;
+	set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
+	tk->offs_real = timespec_to_ktime(tmp);
+}
+
+static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
+{
+	/* Verify consistency before modifying */
+	WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
+
+	tk->total_sleep_time	= t;
+	tk->offs_boot		= timespec_to_ktime(t);
 }
 
 /**
@@ -217,14 +242,6 @@
 	return nsec + arch_gettimeoffset();
 }
 
-static void update_rt_offset(struct timekeeper *tk)
-{
-	struct timespec tmp, *wtm = &tk->wall_to_monotonic;
-
-	set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
-	tk->offs_real = timespec_to_ktime(tmp);
-}
-
 /* must hold write on timekeeper.lock */
 static void timekeeping_update(struct timekeeper *tk, bool clearntp)
 {
@@ -234,12 +251,10 @@
 		tk->ntp_error = 0;
 		ntp_clear();
 	}
-	update_rt_offset(tk);
 	xt = tk_xtime(tk);
 	update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
 }
 
-
 /**
  * timekeeping_forward_now - update clock to the current time
  *
@@ -277,18 +292,19 @@
  */
 void getnstimeofday(struct timespec *ts)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long seq;
 	s64 nsecs = 0;
 
 	WARN_ON(timekeeping_suspended);
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
+		seq = read_seqbegin(&tk->lock);
 
-		ts->tv_sec = timekeeper.xtime_sec;
-		ts->tv_nsec = timekeeping_get_ns(&timekeeper);
+		ts->tv_sec = tk->xtime_sec;
+		ts->tv_nsec = timekeeping_get_ns(tk);
 
-	} while (read_seqretry(&timekeeper.lock, seq));
+	} while (read_seqretry(&tk->lock, seq));
 
 	timespec_add_ns(ts, nsecs);
 }
@@ -296,19 +312,18 @@
 
 ktime_t ktime_get(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned int seq;
 	s64 secs, nsecs;
 
 	WARN_ON(timekeeping_suspended);
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
-		secs = timekeeper.xtime_sec +
-				timekeeper.wall_to_monotonic.tv_sec;
-		nsecs = timekeeping_get_ns(&timekeeper) +
-				timekeeper.wall_to_monotonic.tv_nsec;
+		seq = read_seqbegin(&tk->lock);
+		secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+		nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
 
-	} while (read_seqretry(&timekeeper.lock, seq));
+	} while (read_seqretry(&tk->lock, seq));
 	/*
 	 * Use ktime_set/ktime_add_ns to create a proper ktime on
 	 * 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -327,18 +342,19 @@
  */
 void ktime_get_ts(struct timespec *ts)
 {
+	struct timekeeper *tk = &timekeeper;
 	struct timespec tomono;
 	unsigned int seq;
 
 	WARN_ON(timekeeping_suspended);
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
-		ts->tv_sec = timekeeper.xtime_sec;
-		ts->tv_nsec = timekeeping_get_ns(&timekeeper);
-		tomono = timekeeper.wall_to_monotonic;
+		seq = read_seqbegin(&tk->lock);
+		ts->tv_sec = tk->xtime_sec;
+		ts->tv_nsec = timekeeping_get_ns(tk);
+		tomono = tk->wall_to_monotonic;
 
-	} while (read_seqretry(&timekeeper.lock, seq));
+	} while (read_seqretry(&tk->lock, seq));
 
 	set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
 				ts->tv_nsec + tomono.tv_nsec);
@@ -358,22 +374,23 @@
  */
 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long seq;
 	s64 nsecs_raw, nsecs_real;
 
 	WARN_ON_ONCE(timekeeping_suspended);
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
+		seq = read_seqbegin(&tk->lock);
 
-		*ts_raw = timekeeper.raw_time;
-		ts_real->tv_sec = timekeeper.xtime_sec;
+		*ts_raw = tk->raw_time;
+		ts_real->tv_sec = tk->xtime_sec;
 		ts_real->tv_nsec = 0;
 
-		nsecs_raw = timekeeping_get_ns_raw(&timekeeper);
-		nsecs_real = timekeeping_get_ns(&timekeeper);
+		nsecs_raw = timekeeping_get_ns_raw(tk);
+		nsecs_real = timekeeping_get_ns(tk);
 
-	} while (read_seqretry(&timekeeper.lock, seq));
+	} while (read_seqretry(&tk->lock, seq));
 
 	timespec_add_ns(ts_raw, nsecs_raw);
 	timespec_add_ns(ts_real, nsecs_real);
@@ -406,28 +423,28 @@
  */
 int do_settimeofday(const struct timespec *tv)
 {
+	struct timekeeper *tk = &timekeeper;
 	struct timespec ts_delta, xt;
 	unsigned long flags;
 
 	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
 		return -EINVAL;
 
-	write_seqlock_irqsave(&timekeeper.lock, flags);
+	write_seqlock_irqsave(&tk->lock, flags);
 
-	timekeeping_forward_now(&timekeeper);
+	timekeeping_forward_now(tk);
 
-	xt = tk_xtime(&timekeeper);
+	xt = tk_xtime(tk);
 	ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
 	ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
 
-	timekeeper.wall_to_monotonic =
-			timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
+	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
 
-	tk_set_xtime(&timekeeper, tv);
+	tk_set_xtime(tk, tv);
 
-	timekeeping_update(&timekeeper, true);
+	timekeeping_update(tk, true);
 
-	write_sequnlock_irqrestore(&timekeeper.lock, flags);
+	write_sequnlock_irqrestore(&tk->lock, flags);
 
 	/* signal hrtimers about time change */
 	clock_was_set();
@@ -436,7 +453,6 @@
 }
 EXPORT_SYMBOL(do_settimeofday);
 
-
 /**
  * timekeeping_inject_offset - Adds or subtracts from the current time.
  * @tv:		pointer to the timespec variable containing the offset
@@ -445,23 +461,23 @@
  */
 int timekeeping_inject_offset(struct timespec *ts)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long flags;
 
 	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
 		return -EINVAL;
 
-	write_seqlock_irqsave(&timekeeper.lock, flags);
+	write_seqlock_irqsave(&tk->lock, flags);
 
-	timekeeping_forward_now(&timekeeper);
+	timekeeping_forward_now(tk);
 
 
-	tk_xtime_add(&timekeeper, ts);
-	timekeeper.wall_to_monotonic =
-				timespec_sub(timekeeper.wall_to_monotonic, *ts);
+	tk_xtime_add(tk, ts);
+	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
 
-	timekeeping_update(&timekeeper, true);
+	timekeeping_update(tk, true);
 
-	write_sequnlock_irqrestore(&timekeeper.lock, flags);
+	write_sequnlock_irqrestore(&tk->lock, flags);
 
 	/* signal hrtimers about time change */
 	clock_was_set();
@@ -477,23 +493,24 @@
  */
 static int change_clocksource(void *data)
 {
+	struct timekeeper *tk = &timekeeper;
 	struct clocksource *new, *old;
 	unsigned long flags;
 
 	new = (struct clocksource *) data;
 
-	write_seqlock_irqsave(&timekeeper.lock, flags);
+	write_seqlock_irqsave(&tk->lock, flags);
 
-	timekeeping_forward_now(&timekeeper);
+	timekeeping_forward_now(tk);
 	if (!new->enable || new->enable(new) == 0) {
-		old = timekeeper.clock;
-		tk_setup_internals(&timekeeper, new);
+		old = tk->clock;
+		tk_setup_internals(tk, new);
 		if (old->disable)
 			old->disable(old);
 	}
-	timekeeping_update(&timekeeper, true);
+	timekeeping_update(tk, true);
 
-	write_sequnlock_irqrestore(&timekeeper.lock, flags);
+	write_sequnlock_irqrestore(&tk->lock, flags);
 
 	return 0;
 }
@@ -507,7 +524,9 @@
  */
 void timekeeping_notify(struct clocksource *clock)
 {
-	if (timekeeper.clock == clock)
+	struct timekeeper *tk = &timekeeper;
+
+	if (tk->clock == clock)
 		return;
 	stop_machine(change_clocksource, clock, NULL);
 	tick_clock_notify();
@@ -536,35 +555,36 @@
  */
 void getrawmonotonic(struct timespec *ts)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long seq;
 	s64 nsecs;
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
-		nsecs = timekeeping_get_ns_raw(&timekeeper);
-		*ts = timekeeper.raw_time;
+		seq = read_seqbegin(&tk->lock);
+		nsecs = timekeeping_get_ns_raw(tk);
+		*ts = tk->raw_time;
 
-	} while (read_seqretry(&timekeeper.lock, seq));
+	} while (read_seqretry(&tk->lock, seq));
 
 	timespec_add_ns(ts, nsecs);
 }
 EXPORT_SYMBOL(getrawmonotonic);
 
-
 /**
  * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
  */
 int timekeeping_valid_for_hres(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long seq;
 	int ret;
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
+		seq = read_seqbegin(&tk->lock);
 
-		ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+		ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
 
-	} while (read_seqretry(&timekeeper.lock, seq));
+	} while (read_seqretry(&tk->lock, seq));
 
 	return ret;
 }
@@ -574,15 +594,16 @@
  */
 u64 timekeeping_max_deferment(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long seq;
 	u64 ret;
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
+		seq = read_seqbegin(&tk->lock);
 
-		ret = timekeeper.clock->max_idle_ns;
+		ret = tk->clock->max_idle_ns;
 
-	} while (read_seqretry(&timekeeper.lock, seq));
+	} while (read_seqretry(&tk->lock, seq));
 
 	return ret;
 }
@@ -622,46 +643,43 @@
  */
 void __init timekeeping_init(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	struct clocksource *clock;
 	unsigned long flags;
-	struct timespec now, boot;
+	struct timespec now, boot, tmp;
 
 	read_persistent_clock(&now);
 	read_boot_clock(&boot);
 
-	seqlock_init(&timekeeper.lock);
+	seqlock_init(&tk->lock);
 
 	ntp_init();
 
-	write_seqlock_irqsave(&timekeeper.lock, flags);
+	write_seqlock_irqsave(&tk->lock, flags);
 	clock = clocksource_default_clock();
 	if (clock->enable)
 		clock->enable(clock);
-	tk_setup_internals(&timekeeper, clock);
+	tk_setup_internals(tk, clock);
 
-	tk_set_xtime(&timekeeper, &now);
-	timekeeper.raw_time.tv_sec = 0;
-	timekeeper.raw_time.tv_nsec = 0;
+	tk_set_xtime(tk, &now);
+	tk->raw_time.tv_sec = 0;
+	tk->raw_time.tv_nsec = 0;
 	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
-		boot = tk_xtime(&timekeeper);
+		boot = tk_xtime(tk);
 
-	set_normalized_timespec(&timekeeper.wall_to_monotonic,
-				-boot.tv_sec, -boot.tv_nsec);
-	update_rt_offset(&timekeeper);
-	timekeeper.total_sleep_time.tv_sec = 0;
-	timekeeper.total_sleep_time.tv_nsec = 0;
-	write_sequnlock_irqrestore(&timekeeper.lock, flags);
+	set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
+	tk_set_wall_to_mono(tk, tmp);
+
+	tmp.tv_sec = 0;
+	tmp.tv_nsec = 0;
+	tk_set_sleep_time(tk, tmp);
+
+	write_sequnlock_irqrestore(&tk->lock, flags);
 }
 
 /* time in seconds when suspend began */
 static struct timespec timekeeping_suspend_time;
 
-static void update_sleep_time(struct timespec t)
-{
-	timekeeper.total_sleep_time = t;
-	timekeeper.offs_boot = timespec_to_ktime(t);
-}
-
 /**
  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
  * @delta: pointer to a timespec delta value
@@ -677,13 +695,11 @@
 					"sleep delta value!\n");
 		return;
 	}
-
 	tk_xtime_add(tk, delta);
-	tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta);
-	update_sleep_time(timespec_add(tk->total_sleep_time, *delta));
+	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
+	tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
 }
 
-
 /**
  * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
  * @delta: pointer to a timespec delta value
@@ -696,6 +712,7 @@
  */
 void timekeeping_inject_sleeptime(struct timespec *delta)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long flags;
 	struct timespec ts;
 
@@ -704,21 +721,20 @@
 	if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
 		return;
 
-	write_seqlock_irqsave(&timekeeper.lock, flags);
+	write_seqlock_irqsave(&tk->lock, flags);
 
-	timekeeping_forward_now(&timekeeper);
+	timekeeping_forward_now(tk);
 
-	__timekeeping_inject_sleeptime(&timekeeper, delta);
+	__timekeeping_inject_sleeptime(tk, delta);
 
-	timekeeping_update(&timekeeper, true);
+	timekeeping_update(tk, true);
 
-	write_sequnlock_irqrestore(&timekeeper.lock, flags);
+	write_sequnlock_irqrestore(&tk->lock, flags);
 
 	/* signal hrtimers about time change */
 	clock_was_set();
 }
 
-
 /**
  * timekeeping_resume - Resumes the generic timekeeping subsystem.
  *
@@ -728,6 +744,7 @@
  */
 static void timekeeping_resume(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long flags;
 	struct timespec ts;
 
@@ -735,18 +752,18 @@
 
 	clocksource_resume();
 
-	write_seqlock_irqsave(&timekeeper.lock, flags);
+	write_seqlock_irqsave(&tk->lock, flags);
 
 	if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
 		ts = timespec_sub(ts, timekeeping_suspend_time);
-		__timekeeping_inject_sleeptime(&timekeeper, &ts);
+		__timekeeping_inject_sleeptime(tk, &ts);
 	}
 	/* re-base the last cycle value */
-	timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
-	timekeeper.ntp_error = 0;
+	tk->clock->cycle_last = tk->clock->read(tk->clock);
+	tk->ntp_error = 0;
 	timekeeping_suspended = 0;
-	timekeeping_update(&timekeeper, false);
-	write_sequnlock_irqrestore(&timekeeper.lock, flags);
+	timekeeping_update(tk, false);
+	write_sequnlock_irqrestore(&tk->lock, flags);
 
 	touch_softlockup_watchdog();
 
@@ -758,14 +775,15 @@
 
 static int timekeeping_suspend(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long flags;
 	struct timespec		delta, delta_delta;
 	static struct timespec	old_delta;
 
 	read_persistent_clock(&timekeeping_suspend_time);
 
-	write_seqlock_irqsave(&timekeeper.lock, flags);
-	timekeeping_forward_now(&timekeeper);
+	write_seqlock_irqsave(&tk->lock, flags);
+	timekeeping_forward_now(tk);
 	timekeeping_suspended = 1;
 
 	/*
@@ -774,7 +792,7 @@
 	 * try to compensate so the difference in system time
 	 * and persistent_clock time stays close to constant.
 	 */
-	delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time);
+	delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
 	delta_delta = timespec_sub(delta, old_delta);
 	if (abs(delta_delta.tv_sec)  >= 2) {
 		/*
@@ -787,7 +805,7 @@
 		timekeeping_suspend_time =
 			timespec_add(timekeeping_suspend_time, delta_delta);
 	}
-	write_sequnlock_irqrestore(&timekeeper.lock, flags);
+	write_sequnlock_irqrestore(&tk->lock, flags);
 
 	clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
 	clocksource_suspend();
@@ -898,27 +916,29 @@
 		 * the error. This causes the likely below to be unlikely.
 		 *
 		 * The proper fix is to avoid rounding up by using
-		 * the high precision timekeeper.xtime_nsec instead of
+		 * the high precision tk->xtime_nsec instead of
 		 * xtime.tv_nsec everywhere. Fixing this will take some
 		 * time.
 		 */
 		if (likely(error <= interval))
 			adj = 1;
 		else
-			adj = timekeeping_bigadjust(tk, error, &interval,
-							&offset);
-	} else if (error < -interval) {
-		/* See comment above, this is just switched for the negative */
-		error >>= 2;
-		if (likely(error >= -interval)) {
-			adj = -1;
-			interval = -interval;
-			offset = -offset;
-		} else
-			adj = timekeeping_bigadjust(tk, error, &interval,
-							&offset);
-	} else
-		return;
+			adj = timekeeping_bigadjust(tk, error, &interval, &offset);
+	} else {
+		if (error < -interval) {
+			/* See comment above, this is just switched for the negative */
+			error >>= 2;
+			if (likely(error >= -interval)) {
+				adj = -1;
+				interval = -interval;
+				offset = -offset;
+			} else {
+				adj = timekeeping_bigadjust(tk, error, &interval, &offset);
+			}
+		} else {
+			goto out_adjust;
+		}
+	}
 
 	if (unlikely(tk->clock->maxadj &&
 		(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
@@ -981,6 +1001,7 @@
 	tk->xtime_nsec -= offset;
 	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
 
+out_adjust:
 	/*
 	 * It may be possible that when we entered this function, xtime_nsec
 	 * was very small.  Further, if we're slightly speeding the clocksource
@@ -1003,7 +1024,6 @@
 
 }
 
-
 /**
  * accumulate_nsecs_to_secs - Accumulates nsecs into secs
  *
@@ -1024,15 +1044,21 @@
 
 		/* Figure out if its a leap sec and apply if needed */
 		leap = second_overflow(tk->xtime_sec);
-		tk->xtime_sec += leap;
-		tk->wall_to_monotonic.tv_sec -= leap;
-		if (leap)
-			clock_was_set_delayed();
+		if (unlikely(leap)) {
+			struct timespec ts;
 
+			tk->xtime_sec += leap;
+
+			ts.tv_sec = leap;
+			ts.tv_nsec = 0;
+			tk_set_wall_to_mono(tk,
+				timespec_sub(tk->wall_to_monotonic, ts));
+
+			clock_was_set_delayed();
+		}
 	}
 }
 
-
 /**
  * logarithmic_accumulation - shifted accumulation of cycles
  *
@@ -1076,7 +1102,6 @@
 	return offset;
 }
 
-
 /**
  * update_wall_time - Uses the current clocksource to increment the wall time
  *
@@ -1084,21 +1109,22 @@
 static void update_wall_time(void)
 {
 	struct clocksource *clock;
+	struct timekeeper *tk = &timekeeper;
 	cycle_t offset;
 	int shift = 0, maxshift;
 	unsigned long flags;
 	s64 remainder;
 
-	write_seqlock_irqsave(&timekeeper.lock, flags);
+	write_seqlock_irqsave(&tk->lock, flags);
 
 	/* Make sure we're fully resumed: */
 	if (unlikely(timekeeping_suspended))
 		goto out;
 
-	clock = timekeeper.clock;
+	clock = tk->clock;
 
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
-	offset = timekeeper.cycle_interval;
+	offset = tk->cycle_interval;
 #else
 	offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
 #endif
@@ -1111,19 +1137,19 @@
 	 * chunk in one go, and then try to consume the next smaller
 	 * doubled multiple.
 	 */
-	shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
+	shift = ilog2(offset) - ilog2(tk->cycle_interval);
 	shift = max(0, shift);
 	/* Bound shift to one less than what overflows tick_length */
 	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
 	shift = min(shift, maxshift);
-	while (offset >= timekeeper.cycle_interval) {
-		offset = logarithmic_accumulation(&timekeeper, offset, shift);
-		if(offset < timekeeper.cycle_interval<<shift)
+	while (offset >= tk->cycle_interval) {
+		offset = logarithmic_accumulation(tk, offset, shift);
+		if (offset < tk->cycle_interval<<shift)
 			shift--;
 	}
 
 	/* correct the clock when NTP error is too big */
-	timekeeping_adjust(&timekeeper, offset);
+	timekeeping_adjust(tk, offset);
 
 
 	/*
@@ -1135,21 +1161,21 @@
 	* the vsyscall implementations are converted to use xtime_nsec
 	* (shifted nanoseconds), this can be killed.
 	*/
-	remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1);
-	timekeeper.xtime_nsec -= remainder;
-	timekeeper.xtime_nsec += 1 << timekeeper.shift;
-	timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift;
+	remainder = tk->xtime_nsec & ((1 << tk->shift) - 1);
+	tk->xtime_nsec -= remainder;
+	tk->xtime_nsec += 1 << tk->shift;
+	tk->ntp_error += remainder << tk->ntp_error_shift;
 
 	/*
 	 * Finally, make sure that after the rounding
 	 * xtime_nsec isn't larger than NSEC_PER_SEC
 	 */
-	accumulate_nsecs_to_secs(&timekeeper);
+	accumulate_nsecs_to_secs(tk);
 
-	timekeeping_update(&timekeeper, false);
+	timekeeping_update(tk, false);
 
 out:
-	write_sequnlock_irqrestore(&timekeeper.lock, flags);
+	write_sequnlock_irqrestore(&tk->lock, flags);
 
 }
 
@@ -1166,18 +1192,18 @@
  */
 void getboottime(struct timespec *ts)
 {
+	struct timekeeper *tk = &timekeeper;
 	struct timespec boottime = {
-		.tv_sec = timekeeper.wall_to_monotonic.tv_sec +
-				timekeeper.total_sleep_time.tv_sec,
-		.tv_nsec = timekeeper.wall_to_monotonic.tv_nsec +
-				timekeeper.total_sleep_time.tv_nsec
+		.tv_sec = tk->wall_to_monotonic.tv_sec +
+				tk->total_sleep_time.tv_sec,
+		.tv_nsec = tk->wall_to_monotonic.tv_nsec +
+				tk->total_sleep_time.tv_nsec
 	};
 
 	set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
 }
 EXPORT_SYMBOL_GPL(getboottime);
 
-
 /**
  * get_monotonic_boottime - Returns monotonic time since boot
  * @ts:		pointer to the timespec to be set
@@ -1189,19 +1215,20 @@
  */
 void get_monotonic_boottime(struct timespec *ts)
 {
+	struct timekeeper *tk = &timekeeper;
 	struct timespec tomono, sleep;
 	unsigned int seq;
 
 	WARN_ON(timekeeping_suspended);
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
-		ts->tv_sec = timekeeper.xtime_sec;
-		ts->tv_nsec = timekeeping_get_ns(&timekeeper);
-		tomono = timekeeper.wall_to_monotonic;
-		sleep = timekeeper.total_sleep_time;
+		seq = read_seqbegin(&tk->lock);
+		ts->tv_sec = tk->xtime_sec;
+		ts->tv_nsec = timekeeping_get_ns(tk);
+		tomono = tk->wall_to_monotonic;
+		sleep = tk->total_sleep_time;
 
-	} while (read_seqretry(&timekeeper.lock, seq));
+	} while (read_seqretry(&tk->lock, seq));
 
 	set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
 			ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
@@ -1231,31 +1258,38 @@
  */
 void monotonic_to_bootbased(struct timespec *ts)
 {
-	*ts = timespec_add(*ts, timekeeper.total_sleep_time);
+	struct timekeeper *tk = &timekeeper;
+
+	*ts = timespec_add(*ts, tk->total_sleep_time);
 }
 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
 
 unsigned long get_seconds(void)
 {
-	return timekeeper.xtime_sec;
+	struct timekeeper *tk = &timekeeper;
+
+	return tk->xtime_sec;
 }
 EXPORT_SYMBOL(get_seconds);
 
 struct timespec __current_kernel_time(void)
 {
-	return tk_xtime(&timekeeper);
+	struct timekeeper *tk = &timekeeper;
+
+	return tk_xtime(tk);
 }
 
 struct timespec current_kernel_time(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	struct timespec now;
 	unsigned long seq;
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
+		seq = read_seqbegin(&tk->lock);
 
-		now = tk_xtime(&timekeeper);
-	} while (read_seqretry(&timekeeper.lock, seq));
+		now = tk_xtime(tk);
+	} while (read_seqretry(&tk->lock, seq));
 
 	return now;
 }
@@ -1263,15 +1297,16 @@
 
 struct timespec get_monotonic_coarse(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	struct timespec now, mono;
 	unsigned long seq;
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
+		seq = read_seqbegin(&tk->lock);
 
-		now = tk_xtime(&timekeeper);
-		mono = timekeeper.wall_to_monotonic;
-	} while (read_seqretry(&timekeeper.lock, seq));
+		now = tk_xtime(tk);
+		mono = tk->wall_to_monotonic;
+	} while (read_seqretry(&tk->lock, seq));
 
 	set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
 				now.tv_nsec + mono.tv_nsec);
@@ -1300,14 +1335,15 @@
 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
 				struct timespec *wtom, struct timespec *sleep)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long seq;
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
-		*xtim = tk_xtime(&timekeeper);
-		*wtom = timekeeper.wall_to_monotonic;
-		*sleep = timekeeper.total_sleep_time;
-	} while (read_seqretry(&timekeeper.lock, seq));
+		seq = read_seqbegin(&tk->lock);
+		*xtim = tk_xtime(tk);
+		*wtom = tk->wall_to_monotonic;
+		*sleep = tk->total_sleep_time;
+	} while (read_seqretry(&tk->lock, seq));
 }
 
 #ifdef CONFIG_HIGH_RES_TIMERS
@@ -1321,19 +1357,20 @@
  */
 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
 {
+	struct timekeeper *tk = &timekeeper;
 	ktime_t now;
 	unsigned int seq;
 	u64 secs, nsecs;
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
+		seq = read_seqbegin(&tk->lock);
 
-		secs = timekeeper.xtime_sec;
-		nsecs = timekeeping_get_ns(&timekeeper);
+		secs = tk->xtime_sec;
+		nsecs = timekeeping_get_ns(tk);
 
-		*offs_real = timekeeper.offs_real;
-		*offs_boot = timekeeper.offs_boot;
-	} while (read_seqretry(&timekeeper.lock, seq));
+		*offs_real = tk->offs_real;
+		*offs_boot = tk->offs_boot;
+	} while (read_seqretry(&tk->lock, seq));
 
 	now = ktime_add_ns(ktime_set(secs, 0), nsecs);
 	now = ktime_sub(now, *offs_real);
@@ -1346,19 +1383,19 @@
  */
 ktime_t ktime_get_monotonic_offset(void)
 {
+	struct timekeeper *tk = &timekeeper;
 	unsigned long seq;
 	struct timespec wtom;
 
 	do {
-		seq = read_seqbegin(&timekeeper.lock);
-		wtom = timekeeper.wall_to_monotonic;
-	} while (read_seqretry(&timekeeper.lock, seq));
+		seq = read_seqbegin(&tk->lock);
+		wtom = tk->wall_to_monotonic;
+	} while (read_seqretry(&tk->lock, seq));
 
 	return timespec_to_ktime(wtom);
 }
 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
 
-
 /**
  * xtime_update() - advances the timekeeping infrastructure
  * @ticks:	number of ticks, that have elapsed since the last call.
diff --git a/kernel/timer.c b/kernel/timer.c
index a61c093..8c5e7b9 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1407,13 +1407,6 @@
 
 #endif
 
-#ifndef __alpha__
-
-/*
- * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
- * should be moved into arch/i386 instead?
- */
-
 /**
  * sys_getpid - return the thread group id of the current process
  *
@@ -1469,8 +1462,6 @@
 	return from_kgid_munged(current_user_ns(), current_egid());
 }
 
-#endif
-
 static void process_timeout(unsigned long __data)
 {
 	wake_up_process((struct task_struct *)__data);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index fee3752..8a6d2ee 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -281,7 +281,7 @@
 
 	head = this_cpu_ptr(event_function.perf_events);
 	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
-			      1, &regs, head);
+			      1, &regs, head, NULL);
 
 #undef ENTRY_SIZE
 }
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b31d3d5..1a21170 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1002,7 +1002,8 @@
 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
 	head = this_cpu_ptr(call->perf_events);
-	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
+	perf_trace_buf_submit(entry, size, rctx,
+					entry->ip, 1, regs, head, NULL);
 }
 
 /* Kretprobe profile handler */
@@ -1033,7 +1034,8 @@
 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
 	head = this_cpu_ptr(call->perf_events);
-	perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
+	perf_trace_buf_submit(entry, size, rctx,
+					entry->ret_ip, 1, regs, head, NULL);
 }
 #endif	/* CONFIG_PERF_EVENTS */
 
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 96fc733..60e4d78 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -532,7 +532,7 @@
 			       (unsigned long *)&rec->args);
 
 	head = this_cpu_ptr(sys_data->enter_event->perf_events);
-	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
+	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
 }
 
 int perf_sysenter_enable(struct ftrace_event_call *call)
@@ -608,7 +608,7 @@
 	rec->ret = syscall_get_return_value(current, regs);
 
 	head = this_cpu_ptr(sys_data->exit_event->perf_events);
-	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
+	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
 }
 
 int perf_sysexit_enable(struct ftrace_event_call *call)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 2b36ac6..03003cd 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -670,7 +670,7 @@
 		call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
 
 	head = this_cpu_ptr(call->perf_events);
-	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
+	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);
 
  out:
 	preempt_enable();
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 69add8a..4b1dfba 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -575,7 +575,7 @@
 /*
  * Create/destroy watchdog threads as CPUs come and go:
  */
-static int
+static int __cpuinit
 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
 	int hotcpu = (unsigned long)hcpu;
@@ -610,27 +610,10 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block cpu_nfb = {
+static struct notifier_block __cpuinitdata cpu_nfb = {
 	.notifier_call = cpu_callback
 };
 
-#ifdef CONFIG_SUSPEND
-/*
- * On exit from suspend we force an offline->online transition on the boot CPU
- * so that the PMU state that was lost while in suspended state gets set up
- * properly for the boot CPU.  This information is required for restarting the
- * NMI watchdog.
- */
-void lockup_detector_bootcpu_resume(void)
-{
-	void *cpu = (void *)(long)smp_processor_id();
-
-	cpu_callback(&cpu_nfb, CPU_DEAD_FROZEN, cpu);
-	cpu_callback(&cpu_nfb, CPU_UP_PREPARE_FROZEN, cpu);
-	cpu_callback(&cpu_nfb, CPU_ONLINE_FROZEN, cpu);
-}
-#endif
-
 void __init lockup_detector_init(void)
 {
 	void *cpu = (void *)(long)smp_processor_id();
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6b4718e..b41823c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -39,12 +39,6 @@
 LIST_HEAD(bdi_list);
 LIST_HEAD(bdi_pending_list);
 
-static struct task_struct *sync_supers_tsk;
-static struct timer_list sync_supers_timer;
-
-static int bdi_sync_supers(void *);
-static void sync_supers_timer_fn(unsigned long);
-
 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
 {
 	if (wb1 < wb2) {
@@ -250,12 +244,6 @@
 {
 	int err;
 
-	sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
-	BUG_ON(IS_ERR(sync_supers_tsk));
-
-	setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
-	bdi_arm_supers_timer();
-
 	err = bdi_init(&default_backing_dev_info);
 	if (!err)
 		bdi_register(&default_backing_dev_info, NULL, "default");
@@ -270,46 +258,6 @@
 	return wb_has_dirty_io(&bdi->wb);
 }
 
-/*
- * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
- * or we risk deadlocking on ->s_umount. The longer term solution would be
- * to implement sync_supers_bdi() or similar and simply do it from the
- * bdi writeback thread individually.
- */
-static int bdi_sync_supers(void *unused)
-{
-	set_user_nice(current, 0);
-
-	while (!kthread_should_stop()) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule();
-
-		/*
-		 * Do this periodically, like kupdated() did before.
-		 */
-		sync_supers();
-	}
-
-	return 0;
-}
-
-void bdi_arm_supers_timer(void)
-{
-	unsigned long next;
-
-	if (!dirty_writeback_interval)
-		return;
-
-	next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
-	mod_timer(&sync_supers_timer, round_jiffies_up(next));
-}
-
-static void sync_supers_timer_fn(unsigned long unused)
-{
-	wake_up_process(sync_supers_tsk);
-	bdi_arm_supers_timer();
-}
-
 static void wakeup_timer_fn(unsigned long data)
 {
 	struct backing_dev_info *bdi = (struct backing_dev_info *)data;
diff --git a/mm/compaction.c b/mm/compaction.c
index e78cb96..7fcd3a5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -51,6 +51,47 @@
 }
 
 /*
+ * Compaction requires the taking of some coarse locks that are potentially
+ * very heavily contended. Check if the process needs to be scheduled or
+ * if the lock is contended. For async compaction, back out in the event
+ * if contention is severe. For sync compaction, schedule.
+ *
+ * Returns true if the lock is held.
+ * Returns false if the lock is released and compaction should abort
+ */
+static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
+				      bool locked, struct compact_control *cc)
+{
+	if (need_resched() || spin_is_contended(lock)) {
+		if (locked) {
+			spin_unlock_irqrestore(lock, *flags);
+			locked = false;
+		}
+
+		/* async aborts if taking too long or contended */
+		if (!cc->sync) {
+			if (cc->contended)
+				*cc->contended = true;
+			return false;
+		}
+
+		cond_resched();
+		if (fatal_signal_pending(current))
+			return false;
+	}
+
+	if (!locked)
+		spin_lock_irqsave(lock, *flags);
+	return true;
+}
+
+static inline bool compact_trylock_irqsave(spinlock_t *lock,
+			unsigned long *flags, struct compact_control *cc)
+{
+	return compact_checklock_irqsave(lock, flags, false, cc);
+}
+
+/*
  * Isolate free pages onto a private freelist. Caller must hold zone->lock.
  * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
  * pages inside of the pageblock (even though it may still end up isolating
@@ -173,7 +214,7 @@
 }
 
 /* Update the number of anon and file isolated pages in the zone */
-static void acct_isolated(struct zone *zone, struct compact_control *cc)
+static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
 {
 	struct page *page;
 	unsigned int count[2] = { 0, };
@@ -181,8 +222,14 @@
 	list_for_each_entry(page, &cc->migratepages, lru)
 		count[!!page_is_file_cache(page)]++;
 
-	__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
-	__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+	/* If locked we can use the interrupt unsafe versions */
+	if (locked) {
+		__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
+		__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+	} else {
+		mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
+		mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+	}
 }
 
 /* Similar to reclaim, but different enough that they don't share logic */
@@ -228,6 +275,8 @@
 	struct list_head *migratelist = &cc->migratepages;
 	isolate_mode_t mode = 0;
 	struct lruvec *lruvec;
+	unsigned long flags;
+	bool locked;
 
 	/*
 	 * Ensure that there are not too many pages isolated from the LRU
@@ -247,25 +296,22 @@
 
 	/* Time to isolate some pages for migration */
 	cond_resched();
-	spin_lock_irq(&zone->lru_lock);
+	spin_lock_irqsave(&zone->lru_lock, flags);
+	locked = true;
 	for (; low_pfn < end_pfn; low_pfn++) {
 		struct page *page;
-		bool locked = true;
 
 		/* give a chance to irqs before checking need_resched() */
 		if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
-			spin_unlock_irq(&zone->lru_lock);
+			spin_unlock_irqrestore(&zone->lru_lock, flags);
 			locked = false;
 		}
-		if (need_resched() || spin_is_contended(&zone->lru_lock)) {
-			if (locked)
-				spin_unlock_irq(&zone->lru_lock);
-			cond_resched();
-			spin_lock_irq(&zone->lru_lock);
-			if (fatal_signal_pending(current))
-				break;
-		} else if (!locked)
-			spin_lock_irq(&zone->lru_lock);
+
+		/* Check if it is ok to still hold the lock */
+		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
+								locked, cc);
+		if (!locked)
+			break;
 
 		/*
 		 * migrate_pfn does not necessarily start aligned to a
@@ -349,9 +395,10 @@
 		}
 	}
 
-	acct_isolated(zone, cc);
+	acct_isolated(zone, locked, cc);
 
-	spin_unlock_irq(&zone->lru_lock);
+	if (locked)
+		spin_unlock_irqrestore(&zone->lru_lock, flags);
 
 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
@@ -384,6 +431,20 @@
 }
 
 /*
+ * Returns the start pfn of the last page block in a zone.  This is the starting
+ * point for full compaction of a zone.  Compaction searches for free pages from
+ * the end of each zone, while isolate_freepages_block scans forward inside each
+ * page block.
+ */
+static unsigned long start_free_pfn(struct zone *zone)
+{
+	unsigned long free_pfn;
+	free_pfn = zone->zone_start_pfn + zone->spanned_pages;
+	free_pfn &= ~(pageblock_nr_pages-1);
+	return free_pfn;
+}
+
+/*
  * Based on information in the current compact_control, find blocks
  * suitable for isolating free pages from and then isolate them.
  */
@@ -422,17 +483,6 @@
 					pfn -= pageblock_nr_pages) {
 		unsigned long isolated;
 
-		/*
-		 * Skip ahead if another thread is compacting in the area
-		 * simultaneously. If we wrapped around, we can only skip
-		 * ahead if zone->compact_cached_free_pfn also wrapped to
-		 * above our starting point.
-		 */
-		if (cc->order > 0 && (!cc->wrapped ||
-				      zone->compact_cached_free_pfn >
-				      cc->start_free_pfn))
-			pfn = min(pfn, zone->compact_cached_free_pfn);
-
 		if (!pfn_valid(pfn))
 			continue;
 
@@ -458,7 +508,16 @@
 		 * are disabled
 		 */
 		isolated = 0;
-		spin_lock_irqsave(&zone->lock, flags);
+
+		/*
+		 * The zone lock must be held to isolate freepages. This
+		 * unfortunately this is a very coarse lock and can be
+		 * heavily contended if there are parallel allocations
+		 * or parallel compactions. For async compaction do not
+		 * spin on the lock
+		 */
+		if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
+			break;
 		if (suitable_migration_target(page)) {
 			end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
 			isolated = isolate_freepages_block(pfn, end_pfn,
@@ -474,7 +533,15 @@
 		 */
 		if (isolated) {
 			high_pfn = max(high_pfn, pfn);
-			if (cc->order > 0)
+
+			/*
+			 * If the free scanner has wrapped, update
+			 * compact_cached_free_pfn to point to the highest
+			 * pageblock with free pages. This reduces excessive
+			 * scanning of full pageblocks near the end of the
+			 * zone
+			 */
+			if (cc->order > 0 && cc->wrapped)
 				zone->compact_cached_free_pfn = high_pfn;
 		}
 	}
@@ -484,6 +551,11 @@
 
 	cc->free_pfn = high_pfn;
 	cc->nr_freepages = nr_freepages;
+
+	/* If compact_cached_free_pfn is reset then set it now */
+	if (cc->order > 0 && !cc->wrapped &&
+			zone->compact_cached_free_pfn == start_free_pfn(zone))
+		zone->compact_cached_free_pfn = high_pfn;
 }
 
 /*
@@ -570,20 +642,6 @@
 	return ISOLATE_SUCCESS;
 }
 
-/*
- * Returns the start pfn of the last page block in a zone.  This is the starting
- * point for full compaction of a zone.  Compaction searches for free pages from
- * the end of each zone, while isolate_freepages_block scans forward inside each
- * page block.
- */
-static unsigned long start_free_pfn(struct zone *zone)
-{
-	unsigned long free_pfn;
-	free_pfn = zone->zone_start_pfn + zone->spanned_pages;
-	free_pfn &= ~(pageblock_nr_pages-1);
-	return free_pfn;
-}
-
 static int compact_finished(struct zone *zone,
 			    struct compact_control *cc)
 {
@@ -771,7 +829,7 @@
 
 static unsigned long compact_zone_order(struct zone *zone,
 				 int order, gfp_t gfp_mask,
-				 bool sync)
+				 bool sync, bool *contended)
 {
 	struct compact_control cc = {
 		.nr_freepages = 0,
@@ -780,6 +838,7 @@
 		.migratetype = allocflags_to_migratetype(gfp_mask),
 		.zone = zone,
 		.sync = sync,
+		.contended = contended,
 	};
 	INIT_LIST_HEAD(&cc.freepages);
 	INIT_LIST_HEAD(&cc.migratepages);
@@ -801,7 +860,7 @@
  */
 unsigned long try_to_compact_pages(struct zonelist *zonelist,
 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
-			bool sync)
+			bool sync, bool *contended)
 {
 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
 	int may_enter_fs = gfp_mask & __GFP_FS;
@@ -825,7 +884,8 @@
 								nodemask) {
 		int status;
 
-		status = compact_zone_order(zone, order, gfp_mask, sync);
+		status = compact_zone_order(zone, order, gfp_mask, sync,
+						contended);
 		rc = max(status, rc);
 
 		/* If a normal allocation would succeed, stop compacting */
@@ -861,7 +921,7 @@
 		if (cc->order > 0) {
 			int ok = zone_watermark_ok(zone, cc->order,
 						low_wmark_pages(zone), 0, 0);
-			if (ok && cc->order > zone->compact_order_failed)
+			if (ok && cc->order >= zone->compact_order_failed)
 				zone->compact_order_failed = cc->order + 1;
 			/* Currently async compaction is never deferred. */
 			else if (!ok && cc->sync)
diff --git a/mm/internal.h b/mm/internal.h
index 3314f79..b8c91b3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -130,6 +130,7 @@
 	int order;			/* order a direct compactor needs */
 	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
 	struct zone *zone;
+	bool *contended;		/* True if a lock was contended */
 };
 
 unsigned long
diff --git a/mm/mmap.c b/mm/mmap.c
index e3e8691..9adee9f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2309,7 +2309,7 @@
 	}
 	vm_unacct_memory(nr_accounted);
 
-	BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+	WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
 }
 
 /* Insert vm structure into process list sorted by address
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index e5363f3..5ad5ce2 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1532,7 +1532,6 @@
 	void __user *buffer, size_t *length, loff_t *ppos)
 {
 	proc_dointvec(table, write, buffer, length, ppos);
-	bdi_arm_supers_timer();
 	return 0;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 009ac28..c66fb87 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1928,6 +1928,17 @@
 		zlc_active = 0;
 		goto zonelist_scan;
 	}
+
+	if (page)
+		/*
+		 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
+		 * necessary to allocate the page. The expectation is
+		 * that the caller is taking steps that will free more
+		 * memory. The caller should avoid the page being used
+		 * for !PFMEMALLOC purposes.
+		 */
+		page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+
 	return page;
 }
 
@@ -2091,7 +2102,7 @@
 	struct zonelist *zonelist, enum zone_type high_zoneidx,
 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
 	int migratetype, bool sync_migration,
-	bool *deferred_compaction,
+	bool *contended_compaction, bool *deferred_compaction,
 	unsigned long *did_some_progress)
 {
 	struct page *page;
@@ -2106,7 +2117,8 @@
 
 	current->flags |= PF_MEMALLOC;
 	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
-						nodemask, sync_migration);
+						nodemask, sync_migration,
+						contended_compaction);
 	current->flags &= ~PF_MEMALLOC;
 	if (*did_some_progress != COMPACT_SKIPPED) {
 
@@ -2152,7 +2164,7 @@
 	struct zonelist *zonelist, enum zone_type high_zoneidx,
 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
 	int migratetype, bool sync_migration,
-	bool *deferred_compaction,
+	bool *contended_compaction, bool *deferred_compaction,
 	unsigned long *did_some_progress)
 {
 	return NULL;
@@ -2325,6 +2337,7 @@
 	unsigned long did_some_progress;
 	bool sync_migration = false;
 	bool deferred_compaction = false;
+	bool contended_compaction = false;
 
 	/*
 	 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2389,14 +2402,6 @@
 				zonelist, high_zoneidx, nodemask,
 				preferred_zone, migratetype);
 		if (page) {
-			/*
-			 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
-			 * necessary to allocate the page. The expectation is
-			 * that the caller is taking steps that will free more
-			 * memory. The caller should avoid the page being used
-			 * for !PFMEMALLOC purposes.
-			 */
-			page->pfmemalloc = true;
 			goto got_pg;
 		}
 	}
@@ -2422,6 +2427,7 @@
 					nodemask,
 					alloc_flags, preferred_zone,
 					migratetype, sync_migration,
+					&contended_compaction,
 					&deferred_compaction,
 					&did_some_progress);
 	if (page)
@@ -2431,10 +2437,11 @@
 	/*
 	 * If compaction is deferred for high-order allocations, it is because
 	 * sync compaction recently failed. In this is the case and the caller
-	 * has requested the system not be heavily disrupted, fail the
-	 * allocation now instead of entering direct reclaim
+	 * requested a movable allocation that does not heavily disrupt the
+	 * system then fail the allocation instead of entering direct reclaim.
 	 */
-	if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
+	if ((deferred_compaction || contended_compaction) &&
+						(gfp_mask & __GFP_NO_KSWAPD))
 		goto nopage;
 
 	/* Try direct reclaim and then allocating */
@@ -2505,6 +2512,7 @@
 					nodemask,
 					alloc_flags, preferred_zone,
 					migratetype, sync_migration,
+					&contended_compaction,
 					&deferred_compaction,
 					&did_some_progress);
 		if (page)
@@ -2569,8 +2577,6 @@
 		page = __alloc_pages_slowpath(gfp_mask, order,
 				zonelist, high_zoneidx, nodemask,
 				preferred_zone, migratetype);
-	else
-		page->pfmemalloc = false;
 
 	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
 
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 8ca533c..b258da8 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -368,3 +368,9 @@
 		vlan_vid_del(dev, vid_info->vid);
 }
 EXPORT_SYMBOL(vlan_vids_del_by_dev);
+
+bool vlan_uses_dev(const struct net_device *dev)
+{
+	return rtnl_dereference(dev->vlan_info) ? true : false;
+}
+EXPORT_SYMBOL(vlan_uses_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 73a2a83..4024424 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -137,9 +137,21 @@
 	return rc;
 }
 
+static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	if (vlan->netpoll)
+		netpoll_send_skb(vlan->netpoll, skb);
+#else
+	BUG();
+#endif
+	return NETDEV_TX_OK;
+}
+
 static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
 					    struct net_device *dev)
 {
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
 	unsigned int len;
 	int ret;
@@ -150,29 +162,30 @@
 	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
 	 */
 	if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
-	    vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) {
+	    vlan->flags & VLAN_FLAG_REORDER_HDR) {
 		u16 vlan_tci;
-		vlan_tci = vlan_dev_priv(dev)->vlan_id;
+		vlan_tci = vlan->vlan_id;
 		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
 		skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
 	}
 
-	skb->dev = vlan_dev_priv(dev)->real_dev;
+	skb->dev = vlan->real_dev;
 	len = skb->len;
-	if (netpoll_tx_running(dev))
-		return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
+	if (unlikely(netpoll_tx_running(dev)))
+		return vlan_netpoll_send_skb(vlan, skb);
+
 	ret = dev_queue_xmit(skb);
 
 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 		struct vlan_pcpu_stats *stats;
 
-		stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats);
+		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
 		u64_stats_update_begin(&stats->syncp);
 		stats->tx_packets++;
 		stats->tx_bytes += len;
 		u64_stats_update_end(&stats->syncp);
 	} else {
-		this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped);
+		this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
 	}
 
 	return ret;
@@ -669,25 +682,26 @@
 	return;
 }
 
-static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo,
+				  gfp_t gfp)
 {
-	struct vlan_dev_priv *info = vlan_dev_priv(dev);
-	struct net_device *real_dev = info->real_dev;
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
 	struct netpoll *netpoll;
 	int err = 0;
 
-	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+	netpoll = kzalloc(sizeof(*netpoll), gfp);
 	err = -ENOMEM;
 	if (!netpoll)
 		goto out;
 
-	err = __netpoll_setup(netpoll, real_dev);
+	err = __netpoll_setup(netpoll, real_dev, gfp);
 	if (err) {
 		kfree(netpoll);
 		goto out;
 	}
 
-	info->netpoll = netpoll;
+	vlan->netpoll = netpoll;
 
 out:
 	return err;
@@ -695,19 +709,15 @@
 
 static void vlan_dev_netpoll_cleanup(struct net_device *dev)
 {
-	struct vlan_dev_priv *info = vlan_dev_priv(dev);
-	struct netpoll *netpoll = info->netpoll;
+	struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
+	struct netpoll *netpoll = vlan->netpoll;
 
 	if (!netpoll)
 		return;
 
-	info->netpoll = NULL;
+	vlan->netpoll = NULL;
 
-        /* Wait for transmitting packets to finish before freeing. */
-        synchronize_rcu_bh();
-
-        __netpoll_cleanup(netpoll);
-        kfree(netpoll);
+	__netpoll_free_rcu(netpoll);
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
diff --git a/net/atm/common.c b/net/atm/common.c
index b4b44db..0c0ad93 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -812,6 +812,7 @@
 
 		if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
 			return -ENOTCONN;
+		memset(&pvc, 0, sizeof(pvc));
 		pvc.sap_family = AF_ATMPVC;
 		pvc.sap_addr.itf = vcc->dev->number;
 		pvc.sap_addr.vpi = vcc->vpi;
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 3a73491..ae03240 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -95,6 +95,7 @@
 		return -ENOTCONN;
 	*sockaddr_len = sizeof(struct sockaddr_atmpvc);
 	addr = (struct sockaddr_atmpvc *)sockaddr;
+	memset(addr, 0, sizeof(*addr));
 	addr->sap_family = AF_ATMPVC;
 	addr->sap_addr.itf = vcc->dev->number;
 	addr->sap_addr.vpi = vcc->vpi;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index e877af8..df79300 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -166,13 +166,15 @@
 	int16_t buff_pos;
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct sk_buff *skb;
+	uint8_t *packet_pos;
 
 	if (hard_iface->if_status != BATADV_IF_ACTIVE)
 		return;
 
 	packet_num = 0;
 	buff_pos = 0;
-	batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
+	packet_pos = forw_packet->skb->data;
+	batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
 
 	/* adjust all flags and log packets */
 	while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
@@ -181,15 +183,17 @@
 		/* we might have aggregated direct link packets with an
 		 * ordinary base packet
 		 */
-		if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
-		    (forw_packet->if_incoming == hard_iface))
+		if (forw_packet->direct_link_flags & BIT(packet_num) &&
+		    forw_packet->if_incoming == hard_iface)
 			batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
 		else
 			batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
 
-		fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
-							    "Sending own" :
-							    "Forwarding"));
+		if (packet_num > 0 || !forw_packet->own)
+			fwd_str = "Forwarding";
+		else
+			fwd_str = "Sending own";
+
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
 			   fwd_str, (packet_num > 0 ? "aggregated " : ""),
@@ -204,8 +208,8 @@
 		buff_pos += BATADV_OGM_HLEN;
 		buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
 		packet_num++;
-		batadv_ogm_packet = (struct batadv_ogm_packet *)
-					(forw_packet->skb->data + buff_pos);
+		packet_pos = forw_packet->skb->data + buff_pos;
+		batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
 	}
 
 	/* create clone because function is called more than once */
@@ -227,9 +231,10 @@
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	unsigned char directlink;
+	uint8_t *packet_pos;
 
-	batadv_ogm_packet = (struct batadv_ogm_packet *)
-						(forw_packet->skb->data);
+	packet_pos = forw_packet->skb->data;
+	batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
 	directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
 
 	if (!forw_packet->if_incoming) {
@@ -454,6 +459,7 @@
 				    int packet_len, bool direct_link)
 {
 	unsigned char *skb_buff;
+	unsigned long new_direct_link_flag;
 
 	skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
 	memcpy(skb_buff, packet_buff, packet_len);
@@ -461,9 +467,10 @@
 	forw_packet_aggr->num_packets++;
 
 	/* save packet direct link flag status */
-	if (direct_link)
-		forw_packet_aggr->direct_link_flags |=
-			(1 << forw_packet_aggr->num_packets);
+	if (direct_link) {
+		new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
+		forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
+	}
 }
 
 static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
@@ -586,6 +593,8 @@
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct batadv_hard_iface *primary_if;
 	int vis_server, tt_num_changes = 0;
+	uint32_t seqno;
+	uint8_t bandwidth;
 
 	vis_server = atomic_read(&bat_priv->vis_mode);
 	primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -599,12 +608,12 @@
 	batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
 
 	/* change sequence number to network order */
-	batadv_ogm_packet->seqno =
-			htonl((uint32_t)atomic_read(&hard_iface->seqno));
+	seqno = (uint32_t)atomic_read(&hard_iface->seqno);
+	batadv_ogm_packet->seqno = htonl(seqno);
 	atomic_inc(&hard_iface->seqno);
 
-	batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
-	batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
+	batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
+	batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
 	if (tt_num_changes >= 0)
 		batadv_ogm_packet->tt_num_changes = tt_num_changes;
 
@@ -613,12 +622,13 @@
 	else
 		batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
 
-	if ((hard_iface == primary_if) &&
-	    (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
-		batadv_ogm_packet->gw_flags =
-				(uint8_t)atomic_read(&bat_priv->gw_bandwidth);
-	else
+	if (hard_iface == primary_if &&
+	    atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
+		bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
+		batadv_ogm_packet->gw_flags = bandwidth;
+	} else {
 		batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
+	}
 
 	batadv_slide_own_bcast_window(hard_iface);
 	batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
@@ -642,8 +652,9 @@
 	struct batadv_neigh_node *router = NULL;
 	struct batadv_orig_node *orig_node_tmp;
 	struct hlist_node *node;
-	uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
+	uint8_t sum_orig, sum_neigh;
 	uint8_t *neigh_addr;
+	uint8_t tq_avg;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 		   "update_originator(): Searching and updating originator entry of received packet\n");
@@ -667,8 +678,8 @@
 		spin_lock_bh(&tmp_neigh_node->lq_update_lock);
 		batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
 				       &tmp_neigh_node->tq_index, 0);
-		tmp_neigh_node->tq_avg =
-			batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+		tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+		tmp_neigh_node->tq_avg = tq_avg;
 		spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
 	}
 
@@ -727,17 +738,15 @@
 	if (router && (neigh_node->tq_avg == router->tq_avg)) {
 		orig_node_tmp = router->orig_node;
 		spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
-		bcast_own_sum_orig =
-			orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+		sum_orig = orig_node_tmp->bcast_own_sum[if_incoming->if_num];
 		spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
 
 		orig_node_tmp = neigh_node->orig_node;
 		spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
-		bcast_own_sum_neigh =
-			orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+		sum_neigh = orig_node_tmp->bcast_own_sum[if_incoming->if_num];
 		spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
 
-		if (bcast_own_sum_orig >= bcast_own_sum_neigh)
+		if (sum_orig >= sum_neigh)
 			goto update_tt;
 	}
 
@@ -835,8 +844,10 @@
 	spin_unlock_bh(&orig_node->ogm_cnt_lock);
 
 	/* pay attention to not get a value bigger than 100 % */
-	total_count = (orig_eq_count > neigh_rq_count ?
-		       neigh_rq_count : orig_eq_count);
+	if (orig_eq_count > neigh_rq_count)
+		total_count = neigh_rq_count;
+	else
+		total_count = orig_eq_count;
 
 	/* if we have too few packets (too less data) we set tq_own to zero
 	 * if we receive too few packets it is not considered bidirectional
@@ -910,6 +921,7 @@
 	int set_mark, ret = -1;
 	uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
 	uint8_t *neigh_addr;
+	uint8_t packet_count;
 
 	orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
 	if (!orig_node)
@@ -944,9 +956,9 @@
 						     tmp_neigh_node->real_bits,
 						     seq_diff, set_mark);
 
-		tmp_neigh_node->real_packet_count =
-			bitmap_weight(tmp_neigh_node->real_bits,
-				      BATADV_TQ_LOCAL_WINDOW_SIZE);
+		packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+					     BATADV_TQ_LOCAL_WINDOW_SIZE);
+		tmp_neigh_node->real_packet_count = packet_count;
 	}
 	rcu_read_unlock();
 
@@ -1163,9 +1175,12 @@
 	/* if sender is a direct neighbor the sender mac equals
 	 * originator mac
 	 */
-	orig_neigh_node = (is_single_hop_neigh ?
-			   orig_node :
-			   batadv_get_orig_node(bat_priv, ethhdr->h_source));
+	if (is_single_hop_neigh)
+		orig_neigh_node = orig_node;
+	else
+		orig_neigh_node = batadv_get_orig_node(bat_priv,
+						       ethhdr->h_source);
+
 	if (!orig_neigh_node)
 		goto out;
 
@@ -1251,6 +1266,7 @@
 	int buff_pos = 0, packet_len;
 	unsigned char *tt_buff, *packet_buff;
 	bool ret;
+	uint8_t *packet_pos;
 
 	ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
 	if (!ret)
@@ -1281,8 +1297,8 @@
 		buff_pos += BATADV_OGM_HLEN;
 		buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
 
-		batadv_ogm_packet = (struct batadv_ogm_packet *)
-						(packet_buff + buff_pos);
+		packet_pos = packet_buff + buff_pos;
+		batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
 	} while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
 					   batadv_ogm_packet->tt_num_changes));
 
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6705d35..0a9084a 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -133,7 +133,7 @@
 static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
 						   struct batadv_claim *data)
 {
-	struct batadv_hashtable *hash = bat_priv->claim_hash;
+	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
 	struct hlist_head *head;
 	struct hlist_node *node;
 	struct batadv_claim *claim;
@@ -174,7 +174,7 @@
 batadv_backbone_hash_find(struct batadv_priv *bat_priv,
 			  uint8_t *addr, short vid)
 {
-	struct batadv_hashtable *hash = bat_priv->backbone_hash;
+	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
 	struct hlist_head *head;
 	struct hlist_node *node;
 	struct batadv_backbone_gw search_entry, *backbone_gw;
@@ -218,7 +218,7 @@
 	int i;
 	spinlock_t *list_lock;	/* protects write access to the hash lists */
 
-	hash = backbone_gw->bat_priv->claim_hash;
+	hash = backbone_gw->bat_priv->bla.claim_hash;
 	if (!hash)
 		return;
 
@@ -265,7 +265,7 @@
 	if (!primary_if)
 		return;
 
-	memcpy(&local_claim_dest, &bat_priv->claim_dest,
+	memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
 	       sizeof(local_claim_dest));
 	local_claim_dest.type = claimtype;
 
@@ -281,7 +281,7 @@
 			 NULL,
 			 /* Ethernet SRC/HW SRC:  originator mac */
 			 primary_if->net_dev->dev_addr,
-			 /* HW DST: FF:43:05:XX:00:00
+			 /* HW DST: FF:43:05:XX:YY:YY
 			  * with XX   = claim type
 			  * and YY:YY = group id
 			  */
@@ -295,7 +295,7 @@
 
 	/* now we pretend that the client would have sent this ... */
 	switch (claimtype) {
-	case BATADV_CLAIM_TYPE_ADD:
+	case BATADV_CLAIM_TYPE_CLAIM:
 		/* normal claim frame
 		 * set Ethernet SRC to the clients mac
 		 */
@@ -303,7 +303,7 @@
 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 			   "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
 		break;
-	case BATADV_CLAIM_TYPE_DEL:
+	case BATADV_CLAIM_TYPE_UNCLAIM:
 		/* unclaim frame
 		 * set HW SRC to the clients mac
 		 */
@@ -323,7 +323,8 @@
 		break;
 	case BATADV_CLAIM_TYPE_REQUEST:
 		/* request frame
-		 * set HW SRC to the special mac containg the crc
+		 * set HW SRC and header destination to the receiving backbone
+		 * gws mac
 		 */
 		memcpy(hw_src, mac, ETH_ALEN);
 		memcpy(ethhdr->h_dest, mac, ETH_ALEN);
@@ -339,8 +340,9 @@
 
 	skb_reset_mac_header(skb);
 	skb->protocol = eth_type_trans(skb, soft_iface);
-	bat_priv->stats.rx_packets++;
-	bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+			   skb->len + ETH_HLEN);
 	soft_iface->last_rx = jiffies;
 
 	netif_rx(skb);
@@ -389,7 +391,7 @@
 	/* one for the hash, one for returning */
 	atomic_set(&entry->refcount, 2);
 
-	hash_added = batadv_hash_add(bat_priv->backbone_hash,
+	hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
 				     batadv_compare_backbone_gw,
 				     batadv_choose_backbone_gw, entry,
 				     &entry->hash_entry);
@@ -456,7 +458,7 @@
 	if (!backbone_gw)
 		return;
 
-	hash = bat_priv->claim_hash;
+	hash = bat_priv->bla.claim_hash;
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
@@ -467,7 +469,7 @@
 				continue;
 
 			batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
-					      BATADV_CLAIM_TYPE_ADD);
+					      BATADV_CLAIM_TYPE_CLAIM);
 		}
 		rcu_read_unlock();
 	}
@@ -497,7 +499,7 @@
 
 	/* no local broadcasts should be sent or received, for now. */
 	if (!atomic_read(&backbone_gw->request_sent)) {
-		atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+		atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
 		atomic_set(&backbone_gw->request_sent, 1);
 	}
 }
@@ -557,7 +559,7 @@
 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
 			   "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
 			   mac, vid);
-		hash_added = batadv_hash_add(bat_priv->claim_hash,
+		hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
 					     batadv_compare_claim,
 					     batadv_choose_claim, claim,
 					     &claim->hash_entry);
@@ -577,8 +579,7 @@
 			   "bla_add_claim(): changing ownership for %pM, vid %d\n",
 			   mac, vid);
 
-		claim->backbone_gw->crc ^=
-			crc16(0, claim->addr, ETH_ALEN);
+		claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
 		batadv_backbone_gw_free_ref(claim->backbone_gw);
 
 	}
@@ -610,7 +611,7 @@
 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
 		   mac, vid);
 
-	batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
+	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
 			   batadv_choose_claim, claim);
 	batadv_claim_free_ref(claim); /* reference from the hash is gone */
 
@@ -657,7 +658,7 @@
 		 * we can allow traffic again.
 		 */
 		if (atomic_read(&backbone_gw->request_sent)) {
-			atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
 			atomic_set(&backbone_gw->request_sent, 0);
 		}
 	}
@@ -702,7 +703,7 @@
 	if (primary_if && batadv_compare_eth(backbone_addr,
 					     primary_if->net_dev->dev_addr))
 		batadv_bla_send_claim(bat_priv, claim_addr, vid,
-				      BATADV_CLAIM_TYPE_DEL);
+				      BATADV_CLAIM_TYPE_UNCLAIM);
 
 	backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
 
@@ -738,7 +739,7 @@
 	batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
 	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
 		batadv_bla_send_claim(bat_priv, claim_addr, vid,
-				      BATADV_CLAIM_TYPE_ADD);
+				      BATADV_CLAIM_TYPE_CLAIM);
 
 	/* TODO: we could call something like tt_local_del() here. */
 
@@ -772,7 +773,7 @@
 	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
 
 	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
-	bla_dst_own = &bat_priv->claim_dest;
+	bla_dst_own = &bat_priv->bla.claim_dest;
 
 	/* check if it is a claim packet in general */
 	if (memcmp(bla_dst->magic, bla_dst_own->magic,
@@ -783,12 +784,12 @@
 	 * otherwise assume it is in the hw_src
 	 */
 	switch (bla_dst->type) {
-	case BATADV_CLAIM_TYPE_ADD:
+	case BATADV_CLAIM_TYPE_CLAIM:
 		backbone_addr = hw_src;
 		break;
 	case BATADV_CLAIM_TYPE_REQUEST:
 	case BATADV_CLAIM_TYPE_ANNOUNCE:
-	case BATADV_CLAIM_TYPE_DEL:
+	case BATADV_CLAIM_TYPE_UNCLAIM:
 		backbone_addr = ethhdr->h_source;
 		break;
 	default:
@@ -904,12 +905,12 @@
 
 	/* check for the different types of claim frames ... */
 	switch (bla_dst->type) {
-	case BATADV_CLAIM_TYPE_ADD:
+	case BATADV_CLAIM_TYPE_CLAIM:
 		if (batadv_handle_claim(bat_priv, primary_if, hw_src,
 					ethhdr->h_source, vid))
 			return 1;
 		break;
-	case BATADV_CLAIM_TYPE_DEL:
+	case BATADV_CLAIM_TYPE_UNCLAIM:
 		if (batadv_handle_unclaim(bat_priv, primary_if,
 					  ethhdr->h_source, hw_src, vid))
 			return 1;
@@ -945,7 +946,7 @@
 	spinlock_t *list_lock;	/* protects write access to the hash lists */
 	int i;
 
-	hash = bat_priv->backbone_hash;
+	hash = bat_priv->bla.backbone_hash;
 	if (!hash)
 		return;
 
@@ -969,7 +970,7 @@
 purge_now:
 			/* don't wait for the pending request anymore */
 			if (atomic_read(&backbone_gw->request_sent))
-				atomic_dec(&bat_priv->bla_num_requests);
+				atomic_dec(&bat_priv->bla.num_requests);
 
 			batadv_bla_del_backbone_claims(backbone_gw);
 
@@ -999,7 +1000,7 @@
 	struct batadv_hashtable *hash;
 	int i;
 
-	hash = bat_priv->claim_hash;
+	hash = bat_priv->bla.claim_hash;
 	if (!hash)
 		return;
 
@@ -1046,11 +1047,12 @@
 	struct hlist_node *node;
 	struct hlist_head *head;
 	struct batadv_hashtable *hash;
+	__be16 group;
 	int i;
 
 	/* reset bridge loop avoidance group id */
-	bat_priv->claim_dest.group =
-		htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+	group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+	bat_priv->bla.claim_dest.group = group;
 
 	if (!oldif) {
 		batadv_bla_purge_claims(bat_priv, NULL, 1);
@@ -1058,7 +1060,7 @@
 		return;
 	}
 
-	hash = bat_priv->backbone_hash;
+	hash = bat_priv->bla.backbone_hash;
 	if (!hash)
 		return;
 
@@ -1088,8 +1090,8 @@
 /* (re)start the timer */
 static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
 {
-	INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
-	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
+	INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
+	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
 			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
 }
 
@@ -1099,9 +1101,9 @@
  */
 static void batadv_bla_periodic_work(struct work_struct *work)
 {
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
+	struct delayed_work *delayed_work;
 	struct batadv_priv *bat_priv;
+	struct batadv_priv_bla *priv_bla;
 	struct hlist_node *node;
 	struct hlist_head *head;
 	struct batadv_backbone_gw *backbone_gw;
@@ -1109,7 +1111,9 @@
 	struct batadv_hard_iface *primary_if;
 	int i;
 
-	bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
+	delayed_work = container_of(work, struct delayed_work, work);
+	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
+	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (!primary_if)
 		goto out;
@@ -1120,7 +1124,7 @@
 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
 		goto out;
 
-	hash = bat_priv->backbone_hash;
+	hash = bat_priv->bla.backbone_hash;
 	if (!hash)
 		goto out;
 
@@ -1160,40 +1164,41 @@
 	int i;
 	uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
 	struct batadv_hard_iface *primary_if;
+	uint16_t crc;
+	unsigned long entrytime;
 
 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
 
 	/* setting claim destination address */
-	memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
-	bat_priv->claim_dest.type = 0;
+	memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
+	bat_priv->bla.claim_dest.type = 0;
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (primary_if) {
-		bat_priv->claim_dest.group =
-			htons(crc16(0, primary_if->net_dev->dev_addr,
-				    ETH_ALEN));
+		crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
+		bat_priv->bla.claim_dest.group = htons(crc);
 		batadv_hardif_free_ref(primary_if);
 	} else {
-		bat_priv->claim_dest.group = 0; /* will be set later */
+		bat_priv->bla.claim_dest.group = 0; /* will be set later */
 	}
 
 	/* initialize the duplicate list */
+	entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
 	for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
-		bat_priv->bcast_duplist[i].entrytime =
-			jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
-	bat_priv->bcast_duplist_curr = 0;
+		bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
+	bat_priv->bla.bcast_duplist_curr = 0;
 
-	if (bat_priv->claim_hash)
+	if (bat_priv->bla.claim_hash)
 		return 0;
 
-	bat_priv->claim_hash = batadv_hash_new(128);
-	bat_priv->backbone_hash = batadv_hash_new(32);
+	bat_priv->bla.claim_hash = batadv_hash_new(128);
+	bat_priv->bla.backbone_hash = batadv_hash_new(32);
 
-	if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+	if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
 		return -ENOMEM;
 
-	batadv_hash_set_lock_class(bat_priv->claim_hash,
+	batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
 				   &batadv_claim_hash_lock_class_key);
-	batadv_hash_set_lock_class(bat_priv->backbone_hash,
+	batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
 				   &batadv_backbone_hash_lock_class_key);
 
 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
@@ -1234,8 +1239,9 @@
 	crc = crc16(0, content, length);
 
 	for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
-		curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
-		entry = &bat_priv->bcast_duplist[curr];
+		curr = (bat_priv->bla.bcast_duplist_curr + i);
+		curr %= BATADV_DUPLIST_SIZE;
+		entry = &bat_priv->bla.bcast_duplist[curr];
 
 		/* we can stop searching if the entry is too old ;
 		 * later entries will be even older
@@ -1256,13 +1262,13 @@
 		return 1;
 	}
 	/* not found, add a new entry (overwrite the oldest entry) */
-	curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
+	curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
 	curr %= BATADV_DUPLIST_SIZE;
-	entry = &bat_priv->bcast_duplist[curr];
+	entry = &bat_priv->bla.bcast_duplist[curr];
 	entry->crc = crc;
 	entry->entrytime = jiffies;
 	memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
-	bat_priv->bcast_duplist_curr = curr;
+	bat_priv->bla.bcast_duplist_curr = curr;
 
 	/* allow it, its the first occurence. */
 	return 0;
@@ -1279,7 +1285,7 @@
  */
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
 {
-	struct batadv_hashtable *hash = bat_priv->backbone_hash;
+	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
 	struct hlist_head *head;
 	struct hlist_node *node;
 	struct batadv_backbone_gw *backbone_gw;
@@ -1339,8 +1345,7 @@
 		if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
 			return 0;
 
-		vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
-					      hdr_size);
+		vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
 		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
 	}
 
@@ -1359,18 +1364,18 @@
 {
 	struct batadv_hard_iface *primary_if;
 
-	cancel_delayed_work_sync(&bat_priv->bla_work);
+	cancel_delayed_work_sync(&bat_priv->bla.work);
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 
-	if (bat_priv->claim_hash) {
+	if (bat_priv->bla.claim_hash) {
 		batadv_bla_purge_claims(bat_priv, primary_if, 1);
-		batadv_hash_destroy(bat_priv->claim_hash);
-		bat_priv->claim_hash = NULL;
+		batadv_hash_destroy(bat_priv->bla.claim_hash);
+		bat_priv->bla.claim_hash = NULL;
 	}
-	if (bat_priv->backbone_hash) {
+	if (bat_priv->bla.backbone_hash) {
 		batadv_bla_purge_backbone_gw(bat_priv, 1);
-		batadv_hash_destroy(bat_priv->backbone_hash);
-		bat_priv->backbone_hash = NULL;
+		batadv_hash_destroy(bat_priv->bla.backbone_hash);
+		bat_priv->bla.backbone_hash = NULL;
 	}
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
@@ -1409,7 +1414,7 @@
 		goto allow;
 
 
-	if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
 		/* don't allow broadcasts while requests are in flight */
 		if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
 			goto handled;
@@ -1508,7 +1513,7 @@
 
 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
-	if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
 		/* don't allow broadcasts while requests are in flight */
 		if (is_multicast_ether_addr(ethhdr->h_dest))
 			goto handled;
@@ -1564,7 +1569,7 @@
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	struct batadv_hashtable *hash = bat_priv->claim_hash;
+	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
 	struct batadv_claim *claim;
 	struct batadv_hard_iface *primary_if;
 	struct hlist_node *node;
@@ -1593,7 +1598,7 @@
 	seq_printf(seq,
 		   "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
 		   net_dev->name, primary_addr,
-		   ntohs(bat_priv->claim_dest.group));
+		   ntohs(bat_priv->bla.claim_dest.group));
 	seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-4s)\n",
 		   "Client", "VID", "Originator", "CRC");
 	for (i = 0; i < hash->size; i++) {
@@ -1616,3 +1621,68 @@
 		batadv_hardif_free_ref(primary_if);
 	return ret;
 }
+
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+	struct net_device *net_dev = (struct net_device *)seq->private;
+	struct batadv_priv *bat_priv = netdev_priv(net_dev);
+	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
+	struct batadv_backbone_gw *backbone_gw;
+	struct batadv_hard_iface *primary_if;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	int secs, msecs;
+	uint32_t i;
+	bool is_own;
+	int ret = 0;
+	uint8_t *primary_addr;
+
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if) {
+		ret = seq_printf(seq,
+				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+				 net_dev->name);
+		goto out;
+	}
+
+	if (primary_if->if_status != BATADV_IF_ACTIVE) {
+		ret = seq_printf(seq,
+				 "BATMAN mesh %s disabled - primary interface not active\n",
+				 net_dev->name);
+		goto out;
+	}
+
+	primary_addr = primary_if->net_dev->dev_addr;
+	seq_printf(seq,
+		   "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
+		   net_dev->name, primary_addr,
+		   ntohs(bat_priv->bla.claim_dest.group));
+	seq_printf(seq, "   %-17s    %-5s %-9s (%-4s)\n",
+		   "Originator", "VID", "last seen", "CRC");
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+			msecs = jiffies_to_msecs(jiffies -
+						 backbone_gw->lasttime);
+			secs = msecs / 1000;
+			msecs = msecs % 1000;
+
+			is_own = batadv_compare_eth(backbone_gw->orig,
+						    primary_addr);
+			if (is_own)
+				continue;
+
+			seq_printf(seq,
+				   " * %pM on % 5d % 4i.%03is (%04x)\n",
+				   backbone_gw->orig, backbone_gw->vid,
+				   secs, msecs, backbone_gw->crc);
+		}
+		rcu_read_unlock();
+	}
+out:
+	if (primary_if)
+		batadv_hardif_free_ref(primary_if);
+	return ret;
+}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 563cfbf..789cb73 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -27,6 +27,8 @@
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
 			      struct batadv_orig_node *orig_node, int hdr_size);
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+					     void *offset);
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 				   struct batadv_bcast_packet *bcast_packet,
@@ -41,8 +43,7 @@
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
-				struct sk_buff *skb, short vid,
-				bool is_bcast)
+				struct sk_buff *skb, short vid, bool is_bcast)
 {
 	return 0;
 }
@@ -66,6 +67,12 @@
 	return 0;
 }
 
+static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+							   void *offset)
+{
+	return 0;
+}
+
 static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
 						 uint8_t *orig)
 {
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 34fbb16..391d4fb 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -267,6 +267,15 @@
 	return single_open(file, batadv_bla_claim_table_seq_print_text,
 			   net_dev);
 }
+
+static int batadv_bla_backbone_table_open(struct inode *inode,
+					  struct file *file)
+{
+	struct net_device *net_dev = (struct net_device *)inode->i_private;
+	return single_open(file, batadv_bla_backbone_table_seq_print_text,
+			   net_dev);
+}
+
 #endif
 
 static int batadv_transtable_local_open(struct inode *inode, struct file *file)
@@ -305,6 +314,8 @@
 			batadv_transtable_global_open);
 #ifdef CONFIG_BATMAN_ADV_BLA
 static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
+static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
+			batadv_bla_backbone_table_open);
 #endif
 static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
 			batadv_transtable_local_open);
@@ -316,6 +327,7 @@
 	&batadv_debuginfo_transtable_global,
 #ifdef CONFIG_BATMAN_ADV_BLA
 	&batadv_debuginfo_bla_claim_table,
+	&batadv_debuginfo_bla_backbone_table,
 #endif
 	&batadv_debuginfo_transtable_local,
 	&batadv_debuginfo_vis_data,
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index b421cc4..15d67ab 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -48,7 +48,7 @@
 	struct batadv_gw_node *gw_node;
 
 	rcu_read_lock();
-	gw_node = rcu_dereference(bat_priv->curr_gw);
+	gw_node = rcu_dereference(bat_priv->gw.curr_gw);
 	if (!gw_node)
 		goto out;
 
@@ -91,23 +91,23 @@
 {
 	struct batadv_gw_node *curr_gw_node;
 
-	spin_lock_bh(&bat_priv->gw_list_lock);
+	spin_lock_bh(&bat_priv->gw.list_lock);
 
 	if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
 		new_gw_node = NULL;
 
-	curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
-	rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
+	curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
+	rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
 
 	if (curr_gw_node)
 		batadv_gw_node_free_ref(curr_gw_node);
 
-	spin_unlock_bh(&bat_priv->gw_list_lock);
+	spin_unlock_bh(&bat_priv->gw.list_lock);
 }
 
 void batadv_gw_deselect(struct batadv_priv *bat_priv)
 {
-	atomic_set(&bat_priv->gw_reselect, 1);
+	atomic_set(&bat_priv->gw.reselect, 1);
 }
 
 static struct batadv_gw_node *
@@ -117,12 +117,17 @@
 	struct hlist_node *node;
 	struct batadv_gw_node *gw_node, *curr_gw = NULL;
 	uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
+	uint32_t gw_divisor;
 	uint8_t max_tq = 0;
 	int down, up;
+	uint8_t tq_avg;
 	struct batadv_orig_node *orig_node;
 
+	gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
+	gw_divisor *= 64;
+
 	rcu_read_lock();
-	hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+	hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
 		if (gw_node->deleted)
 			continue;
 
@@ -134,19 +139,19 @@
 		if (!atomic_inc_not_zero(&gw_node->refcount))
 			goto next;
 
+		tq_avg = router->tq_avg;
+
 		switch (atomic_read(&bat_priv->gw_sel_class)) {
 		case 1: /* fast connection */
 			batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
 						    &down, &up);
 
-			tmp_gw_factor = (router->tq_avg * router->tq_avg *
-					 down * 100 * 100) /
-					 (BATADV_TQ_LOCAL_WINDOW_SIZE *
-					  BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
+			tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+			tmp_gw_factor /= gw_divisor;
 
 			if ((tmp_gw_factor > max_gw_factor) ||
 			    ((tmp_gw_factor == max_gw_factor) &&
-			     (router->tq_avg > max_tq))) {
+			     (tq_avg > max_tq))) {
 				if (curr_gw)
 					batadv_gw_node_free_ref(curr_gw);
 				curr_gw = gw_node;
@@ -161,7 +166,7 @@
 			  *     soon as a better gateway appears which has
 			  *     $routing_class more tq points)
 			  */
-			if (router->tq_avg > max_tq) {
+			if (tq_avg > max_tq) {
 				if (curr_gw)
 					batadv_gw_node_free_ref(curr_gw);
 				curr_gw = gw_node;
@@ -170,8 +175,8 @@
 			break;
 		}
 
-		if (router->tq_avg > max_tq)
-			max_tq = router->tq_avg;
+		if (tq_avg > max_tq)
+			max_tq = tq_avg;
 
 		if (tmp_gw_factor > max_gw_factor)
 			max_gw_factor = tmp_gw_factor;
@@ -200,11 +205,11 @@
 	if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
 		goto out;
 
-	if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
-		goto out;
-
 	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
+	if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
+		goto out;
+
 	next_gw = batadv_gw_get_best_gw_node(bat_priv);
 
 	if (curr_gw == next_gw)
@@ -321,9 +326,9 @@
 	gw_node->orig_node = orig_node;
 	atomic_set(&gw_node->refcount, 1);
 
-	spin_lock_bh(&bat_priv->gw_list_lock);
-	hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
-	spin_unlock_bh(&bat_priv->gw_list_lock);
+	spin_lock_bh(&bat_priv->gw.list_lock);
+	hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
+	spin_unlock_bh(&bat_priv->gw.list_lock);
 
 	batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -350,7 +355,7 @@
 	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
 	rcu_read_lock();
-	hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+	hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
 		if (gw_node->orig_node != orig_node)
 			continue;
 
@@ -404,10 +409,10 @@
 
 	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-	spin_lock_bh(&bat_priv->gw_list_lock);
+	spin_lock_bh(&bat_priv->gw.list_lock);
 
 	hlist_for_each_entry_safe(gw_node, node, node_tmp,
-				  &bat_priv->gw_list, list) {
+				  &bat_priv->gw.list, list) {
 		if (((!gw_node->deleted) ||
 		     (time_before(jiffies, gw_node->deleted + timeout))) &&
 		    atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
@@ -420,7 +425,7 @@
 		batadv_gw_node_free_ref(gw_node);
 	}
 
-	spin_unlock_bh(&bat_priv->gw_list_lock);
+	spin_unlock_bh(&bat_priv->gw.list_lock);
 
 	/* gw_deselect() needs to acquire the gw_list_lock */
 	if (do_deselect)
@@ -496,7 +501,7 @@
 		   primary_if->net_dev->dev_addr, net_dev->name);
 
 	rcu_read_lock();
-	hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+	hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
 		if (gw_node->deleted)
 			continue;
 
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 282bf6e..d112fd6 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -103,13 +103,14 @@
 {
 	struct batadv_vis_packet *vis_packet;
 	struct batadv_hard_iface *primary_if;
+	struct sk_buff *skb;
 
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (!primary_if)
 		goto out;
 
-	vis_packet = (struct batadv_vis_packet *)
-				bat_priv->my_vis_info->skb_packet->data;
+	skb = bat_priv->vis.my_info->skb_packet;
+	vis_packet = (struct batadv_vis_packet *)skb->data;
 	memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
 	memcpy(vis_packet->sender_orig,
 	       primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -313,7 +314,13 @@
 	hard_iface->if_num = bat_priv->num_ifaces;
 	bat_priv->num_ifaces++;
 	hard_iface->if_status = BATADV_IF_INACTIVE;
-	batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+	ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+	if (ret < 0) {
+		bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
+		bat_priv->num_ifaces--;
+		hard_iface->if_status = BATADV_IF_NOT_IN_USE;
+		goto err_dev;
+	}
 
 	hard_iface->batman_adv_ptype.type = ethertype;
 	hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 13c88b2..b4aa470 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -58,9 +58,6 @@
 
 	batadv_iv_init();
 
-	/* the name should not be longer than 10 chars - see
-	 * http://lwn.net/Articles/23634/
-	 */
 	batadv_event_workqueue = create_singlethread_workqueue("bat_events");
 
 	if (!batadv_event_workqueue)
@@ -97,20 +94,20 @@
 
 	spin_lock_init(&bat_priv->forw_bat_list_lock);
 	spin_lock_init(&bat_priv->forw_bcast_list_lock);
-	spin_lock_init(&bat_priv->tt_changes_list_lock);
-	spin_lock_init(&bat_priv->tt_req_list_lock);
-	spin_lock_init(&bat_priv->tt_roam_list_lock);
-	spin_lock_init(&bat_priv->tt_buff_lock);
-	spin_lock_init(&bat_priv->gw_list_lock);
-	spin_lock_init(&bat_priv->vis_hash_lock);
-	spin_lock_init(&bat_priv->vis_list_lock);
+	spin_lock_init(&bat_priv->tt.changes_list_lock);
+	spin_lock_init(&bat_priv->tt.req_list_lock);
+	spin_lock_init(&bat_priv->tt.roam_list_lock);
+	spin_lock_init(&bat_priv->tt.last_changeset_lock);
+	spin_lock_init(&bat_priv->gw.list_lock);
+	spin_lock_init(&bat_priv->vis.hash_lock);
+	spin_lock_init(&bat_priv->vis.list_lock);
 
 	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
 	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
-	INIT_HLIST_HEAD(&bat_priv->gw_list);
-	INIT_LIST_HEAD(&bat_priv->tt_changes_list);
-	INIT_LIST_HEAD(&bat_priv->tt_req_list);
-	INIT_LIST_HEAD(&bat_priv->tt_roam_list);
+	INIT_HLIST_HEAD(&bat_priv->gw.list);
+	INIT_LIST_HEAD(&bat_priv->tt.changes_list);
+	INIT_LIST_HEAD(&bat_priv->tt.req_list);
+	INIT_LIST_HEAD(&bat_priv->tt.roam_list);
 
 	ret = batadv_originator_init(bat_priv);
 	if (ret < 0)
@@ -131,7 +128,7 @@
 	if (ret < 0)
 		goto err;
 
-	atomic_set(&bat_priv->gw_reselect, 0);
+	atomic_set(&bat_priv->gw.reselect, 0);
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
 
 	return 0;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 5d8fa07..d57b746 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2012.3.0"
+#define BATADV_SOURCE_VERSION "2012.4.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -41,13 +41,14 @@
  * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
  */
 #define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
-#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
-#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
+#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
 /* sliding packet range of received originator messages in sequence numbers
  * (should be a multiple of our word size)
  */
 #define BATADV_TQ_LOCAL_WINDOW_SIZE 64
-/* miliseconds we have to keep pending tt_req */
+/* milliseconds we have to keep pending tt_req */
 #define BATADV_TT_REQUEST_TIMEOUT 3000
 
 #define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
@@ -59,7 +60,7 @@
 #define BATADV_TT_OGM_APPEND_MAX 3
 
 /* Time in which a client can roam at most ROAMING_MAX_COUNT times in
- * miliseconds
+ * milliseconds
  */
 #define BATADV_ROAMING_MAX_TIME 20000
 #define BATADV_ROAMING_MAX_COUNT 5
@@ -123,15 +124,6 @@
 /* Append 'batman-adv: ' before kernel messages */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-/* all messages related to routing / flooding / broadcasting / etc */
-enum batadv_dbg_level {
-	BATADV_DBG_BATMAN = 1 << 0,
-	BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
-	BATADV_DBG_TT	  = 1 << 2, /* translation table operations */
-	BATADV_DBG_BLA    = 1 << 3, /* bridge loop avoidance */
-	BATADV_DBG_ALL    = 15,
-};
-
 /* Kernel headers */
 
 #include <linux/mutex.h>	/* mutex */
@@ -173,6 +165,15 @@
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
 
+/* all messages related to routing / flooding / broadcasting / etc */
+enum batadv_dbg_level {
+	BATADV_DBG_BATMAN = BIT(0),
+	BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
+	BATADV_DBG_TT	  = BIT(2), /* translation table operations */
+	BATADV_DBG_BLA    = BIT(3), /* bridge loop avoidance */
+	BATADV_DBG_ALL    = 15,
+};
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 8d3e55a..2d23a14 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -37,10 +37,10 @@
 #define BATADV_COMPAT_VERSION 14
 
 enum batadv_iv_flags {
-	BATADV_NOT_BEST_NEXT_HOP   = 1 << 3,
-	BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
-	BATADV_VIS_SERVER	   = 1 << 5,
-	BATADV_DIRECTLINK	   = 1 << 6,
+	BATADV_NOT_BEST_NEXT_HOP   = BIT(3),
+	BATADV_PRIMARIES_FIRST_HOP = BIT(4),
+	BATADV_VIS_SERVER	   = BIT(5),
+	BATADV_DIRECTLINK	   = BIT(6),
 };
 
 /* ICMP message types */
@@ -60,8 +60,8 @@
 
 /* fragmentation defines */
 enum batadv_unicast_frag_flags {
-	BATADV_UNI_FRAG_HEAD	  = 1 << 0,
-	BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
+	BATADV_UNI_FRAG_HEAD	  = BIT(0),
+	BATADV_UNI_FRAG_LARGETAIL = BIT(1),
 };
 
 /* TT_QUERY subtypes */
@@ -74,26 +74,27 @@
 
 /* TT_QUERY flags */
 enum batadv_tt_query_flags {
-	BATADV_TT_FULL_TABLE = 1 << 2,
+	BATADV_TT_FULL_TABLE = BIT(2),
 };
 
 /* BATADV_TT_CLIENT flags.
- * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
- * 1 << 15 are used for local computation only
+ * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
+ * BIT(15) are used for local computation only
  */
 enum batadv_tt_client_flags {
-	BATADV_TT_CLIENT_DEL     = 1 << 0,
-	BATADV_TT_CLIENT_ROAM    = 1 << 1,
-	BATADV_TT_CLIENT_WIFI    = 1 << 2,
-	BATADV_TT_CLIENT_NOPURGE = 1 << 8,
-	BATADV_TT_CLIENT_NEW     = 1 << 9,
-	BATADV_TT_CLIENT_PENDING = 1 << 10,
+	BATADV_TT_CLIENT_DEL     = BIT(0),
+	BATADV_TT_CLIENT_ROAM    = BIT(1),
+	BATADV_TT_CLIENT_WIFI    = BIT(2),
+	BATADV_TT_CLIENT_TEMP	 = BIT(3),
+	BATADV_TT_CLIENT_NOPURGE = BIT(8),
+	BATADV_TT_CLIENT_NEW     = BIT(9),
+	BATADV_TT_CLIENT_PENDING = BIT(10),
 };
 
 /* claim frame types for the bridge loop avoidance */
 enum batadv_bla_claimframe {
-	BATADV_CLAIM_TYPE_ADD		= 0x00,
-	BATADV_CLAIM_TYPE_DEL		= 0x01,
+	BATADV_CLAIM_TYPE_CLAIM		= 0x00,
+	BATADV_CLAIM_TYPE_UNCLAIM	= 0x01,
 	BATADV_CLAIM_TYPE_ANNOUNCE	= 0x02,
 	BATADV_CLAIM_TYPE_REQUEST	= 0x03,
 };
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index bc2b88b..939fc01 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -579,32 +579,45 @@
 	return router;
 }
 
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
 {
-	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct batadv_tt_query_packet *tt_query;
-	uint16_t tt_size;
 	struct ethhdr *ethhdr;
-	char tt_flag;
-	size_t packet_size;
 
 	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb,
-				    sizeof(struct batadv_tt_query_packet))))
-		goto out;
-
-	/* I could need to modify it */
-	if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
-		goto out;
+	if (unlikely(!pskb_may_pull(skb, hdr_size)))
+		return -1;
 
 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
 	/* packet with unicast indication but broadcast recipient */
 	if (is_broadcast_ether_addr(ethhdr->h_dest))
-		goto out;
+		return -1;
 
 	/* packet with broadcast sender address */
 	if (is_broadcast_ether_addr(ethhdr->h_source))
+		return -1;
+
+	/* not for me */
+	if (!batadv_is_my_mac(ethhdr->h_dest))
+		return -1;
+
+	return 0;
+}
+
+int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+{
+	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+	struct batadv_tt_query_packet *tt_query;
+	uint16_t tt_size;
+	int hdr_size = sizeof(*tt_query);
+	char tt_flag;
+	size_t packet_size;
+
+	if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+		return NET_RX_DROP;
+
+	/* I could need to modify it */
+	if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
 		goto out;
 
 	tt_query = (struct batadv_tt_query_packet *)skb->data;
@@ -721,7 +734,7 @@
 	 * been incremented yet. This flag will make me check all the incoming
 	 * packets for the correct destination.
 	 */
-	bat_priv->tt_poss_change = true;
+	bat_priv->tt.poss_change = true;
 
 	batadv_orig_node_free_ref(orig_node);
 out:
@@ -819,31 +832,6 @@
 	return NULL;
 }
 
-static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
-{
-	struct ethhdr *ethhdr;
-
-	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb, hdr_size)))
-		return -1;
-
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* packet with unicast indication but broadcast recipient */
-	if (is_broadcast_ether_addr(ethhdr->h_dest))
-		return -1;
-
-	/* packet with broadcast sender address */
-	if (is_broadcast_ether_addr(ethhdr->h_source))
-		return -1;
-
-	/* not for me */
-	if (!batadv_is_my_mac(ethhdr->h_dest))
-		return -1;
-
-	return 0;
-}
-
 static int batadv_route_unicast_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if)
 {
@@ -947,8 +935,8 @@
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
 	if (batadv_is_my_mac(unicast_packet->dest)) {
-		tt_poss_change = bat_priv->tt_poss_change;
-		curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+		tt_poss_change = bat_priv->tt.poss_change;
+		curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
 	} else {
 		orig_node = batadv_orig_hash_find(bat_priv,
 						  unicast_packet->dest);
@@ -993,8 +981,7 @@
 		} else {
 			memcpy(unicast_packet->dest, orig_node->orig,
 			       ETH_ALEN);
-			curr_ttvn = (uint8_t)
-				atomic_read(&orig_node->last_ttvn);
+			curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
 			batadv_orig_node_free_ref(orig_node);
 		}
 
@@ -1025,8 +1012,9 @@
 
 	/* packet for me */
 	if (batadv_is_my_mac(unicast_packet->dest)) {
-		batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
-				    hdr_size);
+		batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+				    NULL);
+
 		return NET_RX_SUCCESS;
 	}
 
@@ -1063,7 +1051,7 @@
 			return NET_RX_SUCCESS;
 
 		batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
-				    sizeof(struct batadv_unicast_packet));
+				    sizeof(struct batadv_unicast_packet), NULL);
 		return NET_RX_SUCCESS;
 	}
 
@@ -1150,7 +1138,8 @@
 		goto out;
 
 	/* broadcast for me */
-	batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+	batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+			    orig_node);
 	ret = NET_RX_SUCCESS;
 	goto out;
 
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3b4b2da..570a8bc 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -190,13 +190,13 @@
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 {
 	struct batadv_hard_iface *hard_iface;
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
+	struct delayed_work *delayed_work;
 	struct batadv_forw_packet *forw_packet;
 	struct sk_buff *skb1;
 	struct net_device *soft_iface;
 	struct batadv_priv *bat_priv;
 
+	delayed_work = container_of(work, struct delayed_work, work);
 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
 				   delayed_work);
 	soft_iface = forw_packet->if_incoming->soft_iface;
@@ -239,11 +239,11 @@
 
 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
 {
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
+	struct delayed_work *delayed_work;
 	struct batadv_forw_packet *forw_packet;
 	struct batadv_priv *bat_priv;
 
+	delayed_work = container_of(work, struct delayed_work, work);
 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
 				   delayed_work);
 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 109ea2a..7b683e0 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -93,7 +93,14 @@
 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
-	return &bat_priv->stats;
+	struct net_device_stats *stats = &bat_priv->stats;
+
+	stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
+	stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
+	stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
+	stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
+	stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
+	return stats;
 }
 
 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
@@ -142,6 +149,7 @@
 	int data_len = skb->len, ret;
 	short vid __maybe_unused = -1;
 	bool do_bcast = false;
+	uint32_t seqno;
 
 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 		goto dropped;
@@ -223,8 +231,8 @@
 		       primary_if->net_dev->dev_addr, ETH_ALEN);
 
 		/* set broadcast sequence number */
-		bcast_packet->seqno =
-			htonl(atomic_inc_return(&bat_priv->bcast_seqno));
+		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+		bcast_packet->seqno = htonl(seqno);
 
 		batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
 
@@ -246,14 +254,14 @@
 			goto dropped_freed;
 	}
 
-	bat_priv->stats.tx_packets++;
-	bat_priv->stats.tx_bytes += data_len;
+	batadv_inc_counter(bat_priv, BATADV_CNT_TX);
+	batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
 	goto end;
 
 dropped:
 	kfree_skb(skb);
 dropped_freed:
-	bat_priv->stats.tx_dropped++;
+	batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
 end:
 	if (primary_if)
 		batadv_hardif_free_ref(primary_if);
@@ -262,7 +270,7 @@
 
 void batadv_interface_rx(struct net_device *soft_iface,
 			 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
-			 int hdr_size)
+			 int hdr_size, struct batadv_orig_node *orig_node)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	struct ethhdr *ethhdr;
@@ -308,11 +316,16 @@
 
 	/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
 
-	bat_priv->stats.rx_packets++;
-	bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+			   skb->len + ETH_HLEN);
 
 	soft_iface->last_rx = jiffies;
 
+	if (orig_node)
+		batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
+						     ethhdr->h_source);
+
 	if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
 		goto dropped;
 
@@ -379,15 +392,22 @@
 	if (!soft_iface)
 		goto out;
 
+	bat_priv = netdev_priv(soft_iface);
+
+	/* batadv_interface_stats() needs to be available as soon as
+	 * register_netdevice() has been called
+	 */
+	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+	if (!bat_priv->bat_counters)
+		goto free_soft_iface;
+
 	ret = register_netdevice(soft_iface);
 	if (ret < 0) {
 		pr_err("Unable to register the batman interface '%s': %i\n",
 		       name, ret);
-		goto free_soft_iface;
+		goto free_bat_counters;
 	}
 
-	bat_priv = netdev_priv(soft_iface);
-
 	atomic_set(&bat_priv->aggregated_ogms, 1);
 	atomic_set(&bat_priv->bonding, 0);
 	atomic_set(&bat_priv->bridge_loop_avoidance, 0);
@@ -405,29 +425,26 @@
 
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
 	atomic_set(&bat_priv->bcast_seqno, 1);
-	atomic_set(&bat_priv->ttvn, 0);
-	atomic_set(&bat_priv->tt_local_changes, 0);
-	atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
-	atomic_set(&bat_priv->bla_num_requests, 0);
-
-	bat_priv->tt_buff = NULL;
-	bat_priv->tt_buff_len = 0;
-	bat_priv->tt_poss_change = false;
+	atomic_set(&bat_priv->tt.vn, 0);
+	atomic_set(&bat_priv->tt.local_changes, 0);
+	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+#ifdef CONFIG_BATMAN_ADV_BLA
+	atomic_set(&bat_priv->bla.num_requests, 0);
+#endif
+	bat_priv->tt.last_changeset = NULL;
+	bat_priv->tt.last_changeset_len = 0;
+	bat_priv->tt.poss_change = false;
 
 	bat_priv->primary_if = NULL;
 	bat_priv->num_ifaces = 0;
 
-	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
-	if (!bat_priv->bat_counters)
-		goto unreg_soft_iface;
-
 	ret = batadv_algo_select(bat_priv, batadv_routing_algo);
 	if (ret < 0)
-		goto free_bat_counters;
+		goto unreg_soft_iface;
 
 	ret = batadv_sysfs_add_meshif(soft_iface);
 	if (ret < 0)
-		goto free_bat_counters;
+		goto unreg_soft_iface;
 
 	ret = batadv_debugfs_add_meshif(soft_iface);
 	if (ret < 0)
@@ -443,12 +460,13 @@
 	batadv_debugfs_del_meshif(soft_iface);
 unreg_sysfs:
 	batadv_sysfs_del_meshif(soft_iface);
-free_bat_counters:
-	free_percpu(bat_priv->bat_counters);
 unreg_soft_iface:
+	free_percpu(bat_priv->bat_counters);
 	unregister_netdevice(soft_iface);
 	return NULL;
 
+free_bat_counters:
+	free_percpu(bat_priv->bat_counters);
 free_soft_iface:
 	free_netdev(soft_iface);
 out:
@@ -518,6 +536,11 @@
 static const struct {
 	const char name[ETH_GSTRING_LEN];
 } batadv_counters_strings[] = {
+	{ "tx" },
+	{ "tx_bytes" },
+	{ "tx_dropped" },
+	{ "rx" },
+	{ "rx_bytes" },
 	{ "forward" },
 	{ "forward_bytes" },
 	{ "mgmt_tx" },
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 852c683..07a08fe 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -21,8 +21,9 @@
 #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
-void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
-			 struct batadv_hard_iface *recv_if, int hdr_size);
+void batadv_interface_rx(struct net_device *soft_iface,
+			 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
+			 int hdr_size, struct batadv_orig_node *orig_node);
 struct net_device *batadv_softif_create(const char *name);
 void batadv_softif_destroy(struct net_device *soft_iface);
 int batadv_softif_is_valid(const struct net_device *net_dev);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a438f4b..112edd3 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -34,6 +34,10 @@
 static void batadv_tt_purge(struct work_struct *work);
 static void
 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+				 struct batadv_orig_node *orig_node,
+				 const unsigned char *addr,
+				 const char *message, bool roaming);
 
 /* returns 1 if they are the same mac addr */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -46,8 +50,8 @@
 
 static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
 {
-	INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
-	queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
+	INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+	queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
 			   msecs_to_jiffies(5000));
 }
 
@@ -88,7 +92,7 @@
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
 
-	tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
+	tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
 	if (tt_common_entry)
 		tt_local_entry = container_of(tt_common_entry,
 					      struct batadv_tt_local_entry,
@@ -102,7 +106,7 @@
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tt_global_entry *tt_global_entry = NULL;
 
-	tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
+	tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
 	if (tt_common_entry)
 		tt_global_entry = container_of(tt_common_entry,
 					       struct batadv_tt_global_entry,
@@ -152,6 +156,8 @@
 static void
 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
 {
+	if (!atomic_dec_and_test(&orig_entry->refcount))
+		return;
 	/* to avoid race conditions, immediately decrease the tt counter */
 	atomic_dec(&orig_entry->orig_node->tt_size);
 	call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
@@ -175,8 +181,8 @@
 	del_op_requested = flags & BATADV_TT_CLIENT_DEL;
 
 	/* check for ADD+DEL or DEL+ADD events */
-	spin_lock_bh(&bat_priv->tt_changes_list_lock);
-	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+	spin_lock_bh(&bat_priv->tt.changes_list_lock);
+	list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
 				 list) {
 		if (!batadv_compare_eth(entry->change.addr, addr))
 			continue;
@@ -197,20 +203,21 @@
 del:
 		list_del(&entry->list);
 		kfree(entry);
+		kfree(tt_change_node);
 		event_removed = true;
 		goto unlock;
 	}
 
 	/* track the change in the OGMinterval list */
-	list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
+	list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
 
 unlock:
-	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+	spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 
 	if (event_removed)
-		atomic_dec(&bat_priv->tt_local_changes);
+		atomic_dec(&bat_priv->tt.local_changes);
 	else
-		atomic_inc(&bat_priv->tt_local_changes);
+		atomic_inc(&bat_priv->tt.local_changes);
 }
 
 int batadv_tt_len(int changes_num)
@@ -220,12 +227,12 @@
 
 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
 {
-	if (bat_priv->tt_local_hash)
+	if (bat_priv->tt.local_hash)
 		return 0;
 
-	bat_priv->tt_local_hash = batadv_hash_new(1024);
+	bat_priv->tt.local_hash = batadv_hash_new(1024);
 
-	if (!bat_priv->tt_local_hash)
+	if (!bat_priv->tt.local_hash)
 		return -ENOMEM;
 
 	return 0;
@@ -257,7 +264,7 @@
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
-		   (uint8_t)atomic_read(&bat_priv->ttvn));
+		   (uint8_t)atomic_read(&bat_priv->tt.vn));
 
 	memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
 	tt_local_entry->common.flags = BATADV_NO_FLAGS;
@@ -265,6 +272,7 @@
 		tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
 	atomic_set(&tt_local_entry->common.refcount, 2);
 	tt_local_entry->last_seen = jiffies;
+	tt_local_entry->common.added_at = tt_local_entry->last_seen;
 
 	/* the batman interface mac address should never be purged */
 	if (batadv_compare_eth(addr, soft_iface->dev_addr))
@@ -276,7 +284,7 @@
 	 */
 	tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
 
-	hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
+	hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
 				     batadv_choose_orig,
 				     &tt_local_entry->common,
 				     &tt_local_entry->common.hash_entry);
@@ -347,7 +355,7 @@
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 
 	req_len = min_packet_len;
-	req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
+	req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
 
 	/* if we have too many changes for one packet don't send any
 	 * and wait for the tt table request which will be fragmented
@@ -380,10 +388,10 @@
 	if (new_len > 0)
 		tot_changes = new_len / batadv_tt_len(1);
 
-	spin_lock_bh(&bat_priv->tt_changes_list_lock);
-	atomic_set(&bat_priv->tt_local_changes, 0);
+	spin_lock_bh(&bat_priv->tt.changes_list_lock);
+	atomic_set(&bat_priv->tt.local_changes, 0);
 
-	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+	list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
 				 list) {
 		if (count < tot_changes) {
 			memcpy(tt_buff + batadv_tt_len(count),
@@ -393,25 +401,25 @@
 		list_del(&entry->list);
 		kfree(entry);
 	}
-	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+	spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 
 	/* Keep the buffer for possible tt_request */
-	spin_lock_bh(&bat_priv->tt_buff_lock);
-	kfree(bat_priv->tt_buff);
-	bat_priv->tt_buff_len = 0;
-	bat_priv->tt_buff = NULL;
+	spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+	kfree(bat_priv->tt.last_changeset);
+	bat_priv->tt.last_changeset_len = 0;
+	bat_priv->tt.last_changeset = NULL;
 	/* check whether this new OGM has no changes due to size problems */
 	if (new_len > 0) {
 		/* if kmalloc() fails we will reply with the full table
 		 * instead of providing the diff
 		 */
-		bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
-		if (bat_priv->tt_buff) {
-			memcpy(bat_priv->tt_buff, tt_buff, new_len);
-			bat_priv->tt_buff_len = new_len;
+		bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+		if (bat_priv->tt.last_changeset) {
+			memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
+			bat_priv->tt.last_changeset_len = new_len;
 		}
 	}
-	spin_unlock_bh(&bat_priv->tt_buff_lock);
+	spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 
 	return count;
 }
@@ -420,7 +428,7 @@
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_hard_iface *primary_if;
 	struct hlist_node *node;
@@ -445,7 +453,7 @@
 
 	seq_printf(seq,
 		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
-		   net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
+		   net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -543,7 +551,7 @@
 
 static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
 {
-	struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
 	uint32_t i;
@@ -569,10 +577,10 @@
 	struct hlist_head *head;
 	uint32_t i;
 
-	if (!bat_priv->tt_local_hash)
+	if (!bat_priv->tt.local_hash)
 		return;
 
-	hash = bat_priv->tt_local_hash;
+	hash = bat_priv->tt.local_hash;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -592,17 +600,17 @@
 
 	batadv_hash_destroy(hash);
 
-	bat_priv->tt_local_hash = NULL;
+	bat_priv->tt.local_hash = NULL;
 }
 
 static int batadv_tt_global_init(struct batadv_priv *bat_priv)
 {
-	if (bat_priv->tt_global_hash)
+	if (bat_priv->tt.global_hash)
 		return 0;
 
-	bat_priv->tt_global_hash = batadv_hash_new(1024);
+	bat_priv->tt.global_hash = batadv_hash_new(1024);
 
-	if (!bat_priv->tt_global_hash)
+	if (!bat_priv->tt.global_hash)
 		return -ENOMEM;
 
 	return 0;
@@ -612,62 +620,99 @@
 {
 	struct batadv_tt_change_node *entry, *safe;
 
-	spin_lock_bh(&bat_priv->tt_changes_list_lock);
+	spin_lock_bh(&bat_priv->tt.changes_list_lock);
 
-	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+	list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
 				 list) {
 		list_del(&entry->list);
 		kfree(entry);
 	}
 
-	atomic_set(&bat_priv->tt_local_changes, 0);
-	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+	atomic_set(&bat_priv->tt.local_changes, 0);
+	spin_unlock_bh(&bat_priv->tt.changes_list_lock);
+}
+
+/* retrieves the orig_tt_list_entry belonging to orig_node from the
+ * batadv_tt_global_entry list
+ *
+ * returns it with an increased refcounter, NULL if not found
+ */
+static struct batadv_tt_orig_list_entry *
+batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
+				 const struct batadv_orig_node *orig_node)
+{
+	struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
+	const struct hlist_head *head;
+	struct hlist_node *node;
+
+	rcu_read_lock();
+	head = &entry->orig_list;
+	hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
+		if (tmp_orig_entry->orig_node != orig_node)
+			continue;
+		if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
+			continue;
+
+		orig_entry = tmp_orig_entry;
+		break;
+	}
+	rcu_read_unlock();
+
+	return orig_entry;
 }
 
 /* find out if an orig_node is already in the list of a tt_global_entry.
- * returns 1 if found, 0 otherwise
+ * returns true if found, false otherwise
  */
 static bool
 batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
 				const struct batadv_orig_node *orig_node)
 {
-	struct batadv_tt_orig_list_entry *tmp_orig_entry;
-	const struct hlist_head *head;
-	struct hlist_node *node;
+	struct batadv_tt_orig_list_entry *orig_entry;
 	bool found = false;
 
-	rcu_read_lock();
-	head = &entry->orig_list;
-	hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
-		if (tmp_orig_entry->orig_node == orig_node) {
-			found = true;
-			break;
-		}
+	orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
+	if (orig_entry) {
+		found = true;
+		batadv_tt_orig_list_entry_free_ref(orig_entry);
 	}
-	rcu_read_unlock();
+
 	return found;
 }
 
 static void
-batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
 				struct batadv_orig_node *orig_node, int ttvn)
 {
 	struct batadv_tt_orig_list_entry *orig_entry;
 
+	orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
+	if (orig_entry) {
+		/* refresh the ttvn: the current value could be a bogus one that
+		 * was added during a "temporary client detection"
+		 */
+		orig_entry->ttvn = ttvn;
+		goto out;
+	}
+
 	orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
 	if (!orig_entry)
-		return;
+		goto out;
 
 	INIT_HLIST_NODE(&orig_entry->list);
 	atomic_inc(&orig_node->refcount);
 	atomic_inc(&orig_node->tt_size);
 	orig_entry->orig_node = orig_node;
 	orig_entry->ttvn = ttvn;
+	atomic_set(&orig_entry->refcount, 2);
 
-	spin_lock_bh(&tt_global_entry->list_lock);
+	spin_lock_bh(&tt_global->list_lock);
 	hlist_add_head_rcu(&orig_entry->list,
-			   &tt_global_entry->orig_list);
-	spin_unlock_bh(&tt_global_entry->list_lock);
+			   &tt_global->orig_list);
+	spin_unlock_bh(&tt_global->list_lock);
+out:
+	if (orig_entry)
+		batadv_tt_orig_list_entry_free_ref(orig_entry);
 }
 
 /* caller must hold orig_node refcount */
@@ -694,11 +739,12 @@
 		common->flags = flags;
 		tt_global_entry->roam_at = 0;
 		atomic_set(&common->refcount, 2);
+		common->added_at = jiffies;
 
 		INIT_HLIST_HEAD(&tt_global_entry->orig_list);
 		spin_lock_init(&tt_global_entry->list_lock);
 
-		hash_added = batadv_hash_add(bat_priv->tt_global_hash,
+		hash_added = batadv_hash_add(bat_priv->tt.global_hash,
 					     batadv_compare_tt,
 					     batadv_choose_orig, common,
 					     &common->hash_entry);
@@ -708,11 +754,20 @@
 			batadv_tt_global_entry_free_ref(tt_global_entry);
 			goto out_remove;
 		}
-
-		batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
-						ttvn);
 	} else {
-		/* there is already a global entry, use this one. */
+		/* If there is already a global entry, we can use this one for
+		 * our processing.
+		 * But if we are trying to add a temporary client we can exit
+		 * directly because the temporary information should never
+		 * override any already known client state (whatever it is)
+		 */
+		if (flags & BATADV_TT_CLIENT_TEMP)
+			goto out;
+
+		/* if the client was temporary added before receiving the first
+		 * OGM announcing it, we have to clear the TEMP flag
+		 */
+		tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
 
 		/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
 		 * one originator left in the list and we previously received a
@@ -726,12 +781,9 @@
 			tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
 			tt_global_entry->roam_at = 0;
 		}
-
-		if (!batadv_tt_global_entry_has_orig(tt_global_entry,
-						     orig_node))
-			batadv_tt_global_add_orig_entry(tt_global_entry,
-							orig_node, ttvn);
 	}
+	/* add the new orig_entry (if needed) or update it */
+	batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Creating new global tt entry: %pM (via %pM)\n",
@@ -770,11 +822,12 @@
 	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
 		flags = tt_common_entry->flags;
 		last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
-		seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
+		seq_printf(seq,	" * %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
 			   tt_global_entry->common.addr, orig_entry->ttvn,
 			   orig_entry->orig_node->orig, last_ttvn,
 			   (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
-			   (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
+			   (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
+			   (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
 	}
 }
 
@@ -782,7 +835,7 @@
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tt_global_entry *tt_global;
 	struct batadv_hard_iface *primary_if;
@@ -883,7 +936,7 @@
 		   "Deleting global tt entry %pM: %s\n",
 		   tt_global_entry->common.addr, message);
 
-	batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
+	batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
 			   batadv_choose_orig, tt_global_entry->common.addr);
 	batadv_tt_global_entry_free_ref(tt_global_entry);
 
@@ -994,7 +1047,7 @@
 	struct batadv_tt_global_entry *tt_global;
 	struct batadv_tt_common_entry *tt_common_entry;
 	uint32_t i;
-	struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct hlist_node *node, *safe;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1029,49 +1082,63 @@
 	orig_node->tt_initialised = false;
 }
 
-static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
-					     struct hlist_head *head)
+static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
+				      char **msg)
 {
-	struct batadv_tt_common_entry *tt_common_entry;
-	struct batadv_tt_global_entry *tt_global_entry;
-	struct hlist_node *node, *node_tmp;
+	bool purge = false;
+	unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
+	unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
 
-	hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
-				  hash_entry) {
-		tt_global_entry = container_of(tt_common_entry,
-					       struct batadv_tt_global_entry,
-					       common);
-		if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
-			continue;
-		if (!batadv_has_timed_out(tt_global_entry->roam_at,
-					  BATADV_TT_CLIENT_ROAM_TIMEOUT))
-			continue;
-
-		batadv_dbg(BATADV_DBG_TT, bat_priv,
-			   "Deleting global tt entry (%pM): Roaming timeout\n",
-			   tt_global_entry->common.addr);
-
-		hlist_del_rcu(node);
-		batadv_tt_global_entry_free_ref(tt_global_entry);
+	if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
+	    batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
+		purge = true;
+		*msg = "Roaming timeout\n";
 	}
+
+	if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
+	    batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
+		purge = true;
+		*msg = "Temporary client timeout\n";
+	}
+
+	return purge;
 }
 
-static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
+static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
 {
-	struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct hlist_head *head;
+	struct hlist_node *node, *node_tmp;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
 	uint32_t i;
+	char *msg = NULL;
+	struct batadv_tt_common_entry *tt_common;
+	struct batadv_tt_global_entry *tt_global;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 		list_lock = &hash->list_locks[i];
 
 		spin_lock_bh(list_lock);
-		batadv_tt_global_roam_purge_list(bat_priv, head);
+		hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+					  hash_entry) {
+			tt_global = container_of(tt_common,
+						 struct batadv_tt_global_entry,
+						 common);
+
+			if (!batadv_tt_global_to_purge(tt_global, &msg))
+				continue;
+
+			batadv_dbg(BATADV_DBG_TT, bat_priv,
+				   "Deleting global tt entry (%pM): %s\n",
+				   tt_global->common.addr, msg);
+
+			hlist_del_rcu(node);
+
+			batadv_tt_global_entry_free_ref(tt_global);
+		}
 		spin_unlock_bh(list_lock);
 	}
-
 }
 
 static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
@@ -1084,10 +1151,10 @@
 	struct hlist_head *head;
 	uint32_t i;
 
-	if (!bat_priv->tt_global_hash)
+	if (!bat_priv->tt.global_hash)
 		return;
 
-	hash = bat_priv->tt_global_hash;
+	hash = bat_priv->tt.global_hash;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -1107,7 +1174,7 @@
 
 	batadv_hash_destroy(hash);
 
-	bat_priv->tt_global_hash = NULL;
+	bat_priv->tt.global_hash = NULL;
 }
 
 static bool
@@ -1186,7 +1253,7 @@
 				     struct batadv_orig_node *orig_node)
 {
 	uint16_t total = 0, total_one;
-	struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct batadv_tt_global_entry *tt_global;
 	struct hlist_node *node;
@@ -1209,6 +1276,12 @@
 			 */
 			if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
 				continue;
+			/* Temporary clients have not been announced yet, so
+			 * they have to be skipped while computing the global
+			 * crc
+			 */
+			if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
+				continue;
 
 			/* find out if this global entry is announced by this
 			 * originator
@@ -1233,7 +1306,7 @@
 static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
 {
 	uint16_t total = 0, total_one;
-	struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct hlist_node *node;
 	struct hlist_head *head;
@@ -1266,14 +1339,14 @@
 {
 	struct batadv_tt_req_node *node, *safe;
 
-	spin_lock_bh(&bat_priv->tt_req_list_lock);
+	spin_lock_bh(&bat_priv->tt.req_list_lock);
 
-	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
 		list_del(&node->list);
 		kfree(node);
 	}
 
-	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
@@ -1303,15 +1376,15 @@
 {
 	struct batadv_tt_req_node *node, *safe;
 
-	spin_lock_bh(&bat_priv->tt_req_list_lock);
-	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+	spin_lock_bh(&bat_priv->tt.req_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
 		if (batadv_has_timed_out(node->issued_at,
 					 BATADV_TT_REQUEST_TIMEOUT)) {
 			list_del(&node->list);
 			kfree(node);
 		}
 	}
-	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
 /* returns the pointer to the new tt_req_node struct if no request
@@ -1323,8 +1396,8 @@
 {
 	struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
 
-	spin_lock_bh(&bat_priv->tt_req_list_lock);
-	list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
+	spin_lock_bh(&bat_priv->tt.req_list_lock);
+	list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
 		if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
 		    !batadv_has_timed_out(tt_req_node_tmp->issued_at,
 					  BATADV_TT_REQUEST_TIMEOUT))
@@ -1338,9 +1411,9 @@
 	memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
 	tt_req_node->issued_at = jiffies;
 
-	list_add(&tt_req_node->list, &bat_priv->tt_req_list);
+	list_add(&tt_req_node->list, &bat_priv->tt.req_list);
 unlock:
-	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 	return tt_req_node;
 }
 
@@ -1362,7 +1435,8 @@
 	const struct batadv_tt_global_entry *tt_global_entry;
 	const struct batadv_orig_node *orig_node = data_ptr;
 
-	if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
+	if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
+	    tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
 		return 0;
 
 	tt_global_entry = container_of(tt_common_entry,
@@ -1506,9 +1580,9 @@
 	if (ret)
 		kfree_skb(skb);
 	if (ret && tt_req_node) {
-		spin_lock_bh(&bat_priv->tt_req_list_lock);
+		spin_lock_bh(&bat_priv->tt.req_list_lock);
 		list_del(&tt_req_node->list);
-		spin_unlock_bh(&bat_priv->tt_req_list_lock);
+		spin_unlock_bh(&bat_priv->tt.req_list_lock);
 		kfree(tt_req_node);
 	}
 	return ret;
@@ -1529,6 +1603,7 @@
 	uint16_t tt_len, tt_tot;
 	struct sk_buff *skb = NULL;
 	struct batadv_tt_query_packet *tt_response;
+	uint8_t *packet_pos;
 	size_t len;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1582,8 +1657,8 @@
 			goto unlock;
 
 		skb_reserve(skb, ETH_HLEN);
-		tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
-								       len);
+		packet_pos = skb_put(skb, len);
+		tt_response = (struct batadv_tt_query_packet *)packet_pos;
 		tt_response->ttvn = req_ttvn;
 		tt_response->tt_data = htons(tt_tot);
 
@@ -1599,7 +1674,7 @@
 		ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
 
 		skb = batadv_tt_response_fill_table(tt_len, ttvn,
-						    bat_priv->tt_global_hash,
+						    bat_priv->tt.global_hash,
 						    primary_if,
 						    batadv_tt_global_valid,
 						    req_dst_orig_node);
@@ -1662,6 +1737,7 @@
 	uint16_t tt_len, tt_tot;
 	struct sk_buff *skb = NULL;
 	struct batadv_tt_query_packet *tt_response;
+	uint8_t *packet_pos;
 	size_t len;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1670,7 +1746,7 @@
 		   (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
 
-	my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+	my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
 	req_ttvn = tt_request->ttvn;
 
 	orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
@@ -1689,7 +1765,7 @@
 	 * is too big send the whole local translation table
 	 */
 	if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
-	    !bat_priv->tt_buff)
+	    !bat_priv->tt.last_changeset)
 		full_table = true;
 	else
 		full_table = false;
@@ -1698,8 +1774,8 @@
 	 * I'll send only one packet with as much TT entries as I can
 	 */
 	if (!full_table) {
-		spin_lock_bh(&bat_priv->tt_buff_lock);
-		tt_len = bat_priv->tt_buff_len;
+		spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+		tt_len = bat_priv->tt.last_changeset_len;
 		tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
 		len = sizeof(*tt_response) + tt_len;
@@ -1708,22 +1784,22 @@
 			goto unlock;
 
 		skb_reserve(skb, ETH_HLEN);
-		tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
-								       len);
+		packet_pos = skb_put(skb, len);
+		tt_response = (struct batadv_tt_query_packet *)packet_pos;
 		tt_response->ttvn = req_ttvn;
 		tt_response->tt_data = htons(tt_tot);
 
 		tt_buff = skb->data + sizeof(*tt_response);
-		memcpy(tt_buff, bat_priv->tt_buff,
-		       bat_priv->tt_buff_len);
-		spin_unlock_bh(&bat_priv->tt_buff_lock);
+		memcpy(tt_buff, bat_priv->tt.last_changeset,
+		       bat_priv->tt.last_changeset_len);
+		spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 	} else {
-		tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
+		tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
 		tt_len *= sizeof(struct batadv_tt_change);
-		ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+		ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
 
 		skb = batadv_tt_response_fill_table(tt_len, ttvn,
-						    bat_priv->tt_local_hash,
+						    bat_priv->tt.local_hash,
 						    primary_if,
 						    batadv_tt_local_valid_entry,
 						    NULL);
@@ -1755,7 +1831,7 @@
 	goto out;
 
 unlock:
-	spin_unlock_bh(&bat_priv->tt_buff_lock);
+	spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 out:
 	if (orig_node)
 		batadv_orig_node_free_ref(orig_node);
@@ -1908,14 +1984,14 @@
 	}
 
 	/* Delete the tt_req_node from pending tt_requests list */
-	spin_lock_bh(&bat_priv->tt_req_list_lock);
-	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+	spin_lock_bh(&bat_priv->tt.req_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
 		if (!batadv_compare_eth(node->addr, tt_response->src))
 			continue;
 		list_del(&node->list);
 		kfree(node);
 	}
-	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 
 	/* Recalculate the CRC for this orig_node and store it */
 	orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
@@ -1949,22 +2025,22 @@
 {
 	struct batadv_tt_roam_node *node, *safe;
 
-	spin_lock_bh(&bat_priv->tt_roam_list_lock);
+	spin_lock_bh(&bat_priv->tt.roam_list_lock);
 
-	list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+	list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
 		list_del(&node->list);
 		kfree(node);
 	}
 
-	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+	spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
 static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
 {
 	struct batadv_tt_roam_node *node, *safe;
 
-	spin_lock_bh(&bat_priv->tt_roam_list_lock);
-	list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+	spin_lock_bh(&bat_priv->tt.roam_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
 		if (!batadv_has_timed_out(node->first_time,
 					  BATADV_ROAMING_MAX_TIME))
 			continue;
@@ -1972,7 +2048,7 @@
 		list_del(&node->list);
 		kfree(node);
 	}
-	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+	spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
 /* This function checks whether the client already reached the
@@ -1987,11 +2063,11 @@
 	struct batadv_tt_roam_node *tt_roam_node;
 	bool ret = false;
 
-	spin_lock_bh(&bat_priv->tt_roam_list_lock);
+	spin_lock_bh(&bat_priv->tt.roam_list_lock);
 	/* The new tt_req will be issued only if I'm not waiting for a
 	 * reply from the same orig_node yet
 	 */
-	list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
+	list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
 		if (!batadv_compare_eth(tt_roam_node->addr, client))
 			continue;
 
@@ -2016,12 +2092,12 @@
 			   BATADV_ROAMING_MAX_COUNT - 1);
 		memcpy(tt_roam_node->addr, client, ETH_ALEN);
 
-		list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
+		list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
 		ret = true;
 	}
 
 unlock:
-	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+	spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 	return ret;
 }
 
@@ -2085,13 +2161,15 @@
 static void batadv_tt_purge(struct work_struct *work)
 {
 	struct delayed_work *delayed_work;
+	struct batadv_priv_tt *priv_tt;
 	struct batadv_priv *bat_priv;
 
 	delayed_work = container_of(work, struct delayed_work, work);
-	bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
+	priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
+	bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
 	batadv_tt_local_purge(bat_priv);
-	batadv_tt_global_roam_purge(bat_priv);
+	batadv_tt_global_purge(bat_priv);
 	batadv_tt_req_purge(bat_priv);
 	batadv_tt_roam_purge(bat_priv);
 
@@ -2100,7 +2178,7 @@
 
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
-	cancel_delayed_work_sync(&bat_priv->tt_work);
+	cancel_delayed_work_sync(&bat_priv->tt.work);
 
 	batadv_tt_local_table_free(bat_priv);
 	batadv_tt_global_table_free(bat_priv);
@@ -2108,7 +2186,7 @@
 	batadv_tt_changes_list_free(bat_priv);
 	batadv_tt_roam_list_free(bat_priv);
 
-	kfree(bat_priv->tt_buff);
+	kfree(bat_priv->tt.last_changeset);
 }
 
 /* This function will enable or disable the specified flags for all the entries
@@ -2152,7 +2230,7 @@
 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
 static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 {
-	struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct batadv_tt_local_entry *tt_local;
 	struct hlist_node *node, *node_tmp;
@@ -2177,7 +2255,7 @@
 				   "Deleting local tt entry (%pM): pending\n",
 				   tt_common->addr);
 
-			atomic_dec(&bat_priv->num_local_tt);
+			atomic_dec(&bat_priv->tt.local_entry_num);
 			hlist_del_rcu(node);
 			tt_local = container_of(tt_common,
 						struct batadv_tt_local_entry,
@@ -2195,26 +2273,26 @@
 {
 	uint16_t changed_num = 0;
 
-	if (atomic_read(&bat_priv->tt_local_changes) < 1)
+	if (atomic_read(&bat_priv->tt.local_changes) < 1)
 		return -ENOENT;
 
-	changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
+	changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
 					  BATADV_TT_CLIENT_NEW, false);
 
 	/* all reset entries have to be counted as local entries */
-	atomic_add(changed_num, &bat_priv->num_local_tt);
+	atomic_add(changed_num, &bat_priv->tt.local_entry_num);
 	batadv_tt_local_purge_pending_clients(bat_priv);
-	bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
+	bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
 
 	/* Increment the TTVN only once per OGM interval */
-	atomic_inc(&bat_priv->ttvn);
+	atomic_inc(&bat_priv->tt.vn);
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Local changes committed, updating to ttvn %u\n",
-		   (uint8_t)atomic_read(&bat_priv->ttvn));
-	bat_priv->tt_poss_change = false;
+		   (uint8_t)atomic_read(&bat_priv->tt.vn));
+	bat_priv->tt.poss_change = false;
 
 	/* reset the sending counter */
-	atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+	atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
 
 	return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
 					   packet_buff_len, packet_min_len);
@@ -2234,7 +2312,7 @@
 
 	/* if the changes have been sent often enough */
 	if ((tt_num_changes < 0) &&
-	    (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
+	    (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
 		batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
 					      packet_min_len, packet_min_len);
 		tt_num_changes = 0;
@@ -2365,3 +2443,22 @@
 out:
 	return ret;
 }
+
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig_node,
+					  const unsigned char *addr)
+{
+	bool ret = false;
+
+	if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+				  BATADV_TT_CLIENT_TEMP,
+				  atomic_read(&orig_node->last_ttvn)))
+		goto out;
+
+	batadv_dbg(BATADV_DBG_TT, bat_priv,
+		   "Added temporary global client (addr: %pM orig: %pM)\n",
+		   addr, orig_node->orig);
+	ret = true;
+out:
+	return ret;
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index ffa8735..811fffd 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -59,6 +59,8 @@
 			  int packet_min_len);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
 					uint8_t *addr);
-
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig_node,
+					  const unsigned char *addr);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 12635fd..2ed82ca 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -145,6 +145,11 @@
 #endif
 
 enum batadv_counters {
+	BATADV_CNT_TX,
+	BATADV_CNT_TX_BYTES,
+	BATADV_CNT_TX_DROPPED,
+	BATADV_CNT_RX,
+	BATADV_CNT_RX_BYTES,
 	BATADV_CNT_FORWARD,
 	BATADV_CNT_FORWARD_BYTES,
 	BATADV_CNT_MGMT_TX,
@@ -160,6 +165,67 @@
 	BATADV_CNT_NUM,
 };
 
+/**
+ * struct batadv_priv_tt - per mesh interface translation table data
+ * @vn: translation table version number
+ * @local_changes: changes registered in an originator interval
+ * @poss_change: Detect an ongoing roaming phase. If true, then this node
+ *  received a roaming_adv and has to inspect every packet directed to it to
+ *  check whether it still is the true destination or not. This flag will be
+ *  reset to false as soon as the this node's ttvn is increased
+ * @changes_list: tracks tt local changes within an originator interval
+ * @req_list: list of pending tt_requests
+ * @local_crc: Checksum of the local table, recomputed before sending a new OGM
+ */
+struct batadv_priv_tt {
+	atomic_t vn;
+	atomic_t ogm_append_cnt;
+	atomic_t local_changes;
+	bool poss_change;
+	struct list_head changes_list;
+	struct batadv_hashtable *local_hash;
+	struct batadv_hashtable *global_hash;
+	struct list_head req_list;
+	struct list_head roam_list;
+	spinlock_t changes_list_lock; /* protects changes */
+	spinlock_t req_list_lock; /* protects req_list */
+	spinlock_t roam_list_lock; /* protects roam_list */
+	atomic_t local_entry_num;
+	uint16_t local_crc;
+	unsigned char *last_changeset;
+	int16_t last_changeset_len;
+	spinlock_t last_changeset_lock; /* protects last_changeset */
+	struct delayed_work work;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct batadv_priv_bla {
+	atomic_t num_requests; /* number of bla requests in flight */
+	struct batadv_hashtable *claim_hash;
+	struct batadv_hashtable *backbone_hash;
+	struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+	int bcast_duplist_curr;
+	struct batadv_bla_claim_dst claim_dest;
+	struct delayed_work work;
+};
+#endif
+
+struct batadv_priv_gw {
+	struct hlist_head list;
+	spinlock_t list_lock; /* protects gw_list and curr_gw */
+	struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+	atomic_t reselect;
+};
+
+struct batadv_priv_vis {
+	struct list_head send_list;
+	struct batadv_hashtable *hash;
+	spinlock_t hash_lock; /* protects hash */
+	spinlock_t list_lock; /* protects info::recv_list */
+	struct delayed_work work;
+	struct batadv_vis_info *my_info;
+};
+
 struct batadv_priv {
 	atomic_t mesh_state;
 	struct net_device_stats stats;
@@ -179,64 +245,24 @@
 	atomic_t bcast_seqno;
 	atomic_t bcast_queue_left;
 	atomic_t batman_queue_left;
-	atomic_t ttvn; /* translation table version number */
-	atomic_t tt_ogm_append_cnt;
-	atomic_t tt_local_changes; /* changes registered in a OGM interval */
-	atomic_t bla_num_requests; /* number of bla requests in flight */
-	/* The tt_poss_change flag is used to detect an ongoing roaming phase.
-	 * If true, then I received a Roaming_adv and I have to inspect every
-	 * packet directed to me to check whether I am still the true
-	 * destination or not. This flag will be reset to false as soon as I
-	 * increase my TTVN
-	 */
-	bool tt_poss_change;
 	char num_ifaces;
 	struct batadv_debug_log *debug_log;
 	struct kobject *mesh_obj;
 	struct dentry *debug_dir;
 	struct hlist_head forw_bat_list;
 	struct hlist_head forw_bcast_list;
-	struct hlist_head gw_list;
-	struct list_head tt_changes_list; /* tracks changes in a OGM int */
-	struct list_head vis_send_list;
 	struct batadv_hashtable *orig_hash;
-	struct batadv_hashtable *tt_local_hash;
-	struct batadv_hashtable *tt_global_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
-	struct batadv_hashtable *claim_hash;
-	struct batadv_hashtable *backbone_hash;
-#endif
-	struct list_head tt_req_list; /* list of pending tt_requests */
-	struct list_head tt_roam_list;
-	struct batadv_hashtable *vis_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
-	struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
-	int bcast_duplist_curr;
-	struct batadv_bla_claim_dst claim_dest;
-#endif
 	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
 	spinlock_t forw_bcast_list_lock; /* protects  */
-	spinlock_t tt_changes_list_lock; /* protects tt_changes */
-	spinlock_t tt_req_list_lock; /* protects tt_req_list */
-	spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
-	spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
-	spinlock_t vis_hash_lock; /* protects vis_hash */
-	spinlock_t vis_list_lock; /* protects vis_info::recv_list */
-	atomic_t num_local_tt;
-	/* Checksum of the local table, recomputed before sending a new OGM */
-	uint16_t tt_crc;
-	unsigned char *tt_buff;
-	int16_t tt_buff_len;
-	spinlock_t tt_buff_lock; /* protects tt_buff */
-	struct delayed_work tt_work;
 	struct delayed_work orig_work;
-	struct delayed_work vis_work;
-	struct delayed_work bla_work;
-	struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
-	atomic_t gw_reselect;
 	struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
-	struct batadv_vis_info *my_vis_info;
 	struct batadv_algo_ops *bat_algo_ops;
+#ifdef CONFIG_BATMAN_ADV_BLA
+	struct batadv_priv_bla bla;
+#endif
+	struct batadv_priv_gw gw;
+	struct batadv_priv_tt tt;
+	struct batadv_priv_vis vis;
 };
 
 struct batadv_socket_client {
@@ -258,6 +284,7 @@
 	uint8_t addr[ETH_ALEN];
 	struct hlist_node hash_entry;
 	uint16_t flags;
+	unsigned long added_at;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -277,6 +304,7 @@
 struct batadv_tt_orig_list_entry {
 	struct batadv_orig_node *orig_node;
 	uint8_t ttvn;
+	atomic_t refcount;
 	struct rcu_head rcu;
 	struct hlist_node list;
 };
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 0016464..f397232 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -39,6 +39,7 @@
 	struct batadv_unicast_packet *unicast_packet;
 	int hdr_len = sizeof(*unicast_packet);
 	int uni_diff = sizeof(*up) - hdr_len;
+	uint8_t *packet_pos;
 
 	up = (struct batadv_unicast_frag_packet *)skb->data;
 	/* set skb to the first part and tmp_skb to the second part */
@@ -65,8 +66,8 @@
 	kfree_skb(tmp_skb);
 
 	memmove(skb->data + uni_diff, skb->data, hdr_len);
-	unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
-								  uni_diff);
+	packet_pos = skb_pull(skb, uni_diff);
+	unicast_packet = (struct batadv_unicast_packet *)packet_pos;
 	unicast_packet->header.packet_type = BATADV_UNICAST;
 
 	return skb;
@@ -121,6 +122,7 @@
 {
 	struct batadv_frag_packet_list_entry *tfp;
 	struct batadv_unicast_frag_packet *tmp_up = NULL;
+	int is_head_tmp, is_head;
 	uint16_t search_seqno;
 
 	if (up->flags & BATADV_UNI_FRAG_HEAD)
@@ -128,6 +130,8 @@
 	else
 		search_seqno = ntohs(up->seqno)-1;
 
+	is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
+
 	list_for_each_entry(tfp, head, list) {
 
 		if (!tfp->skb)
@@ -139,9 +143,8 @@
 		tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
 
 		if (tfp->seqno == search_seqno) {
-
-			if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
-			    (up->flags & BATADV_UNI_FRAG_HEAD))
+			is_head_tmp = !!(tmp_up->flags & BATADV_UNI_FRAG_HEAD);
+			if (is_head_tmp != is_head)
 				return tfp;
 			else
 				goto mov_tail;
@@ -334,8 +337,7 @@
 	/* copy the destination for faster routing */
 	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
 	/* set the destination tt version number */
-	unicast_packet->ttvn =
-		(uint8_t)atomic_read(&orig_node->last_ttvn);
+	unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
 
 	/* inform the destination node that we are still missing a correct route
 	 * for this client. The destination will receive this packet and will
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 2a2ea06..5abd145 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -41,13 +41,13 @@
 	bat_priv = info->bat_priv;
 
 	list_del_init(&info->send_list);
-	spin_lock_bh(&bat_priv->vis_list_lock);
+	spin_lock_bh(&bat_priv->vis.list_lock);
 	list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
 		list_del(&entry->list);
 		kfree(entry);
 	}
 
-	spin_unlock_bh(&bat_priv->vis_list_lock);
+	spin_unlock_bh(&bat_priv->vis.list_lock);
 	kfree_skb(info->skb_packet);
 	kfree(info);
 }
@@ -94,7 +94,7 @@
 static struct batadv_vis_info *
 batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
 {
-	struct batadv_hashtable *hash = bat_priv->vis_hash;
+	struct batadv_hashtable *hash = bat_priv->vis.hash;
 	struct hlist_head *head;
 	struct hlist_node *node;
 	struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
@@ -252,7 +252,7 @@
 	struct hlist_head *head;
 	struct net_device *net_dev = (struct net_device *)seq->private;
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	struct batadv_hashtable *hash = bat_priv->vis_hash;
+	struct batadv_hashtable *hash = bat_priv->vis.hash;
 	uint32_t i;
 	int ret = 0;
 	int vis_server = atomic_read(&bat_priv->vis_mode);
@@ -264,12 +264,12 @@
 	if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
 		goto out;
 
-	spin_lock_bh(&bat_priv->vis_hash_lock);
+	spin_lock_bh(&bat_priv->vis.hash_lock);
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 		batadv_vis_seq_print_text_bucket(seq, head);
 	}
-	spin_unlock_bh(&bat_priv->vis_hash_lock);
+	spin_unlock_bh(&bat_priv->vis.hash_lock);
 
 out:
 	if (primary_if)
@@ -285,7 +285,7 @@
 {
 	if (list_empty(&info->send_list)) {
 		kref_get(&info->refcount);
-		list_add_tail(&info->send_list, &bat_priv->vis_send_list);
+		list_add_tail(&info->send_list, &bat_priv->vis.send_list);
 	}
 }
 
@@ -311,9 +311,9 @@
 		return;
 
 	memcpy(entry->mac, mac, ETH_ALEN);
-	spin_lock_bh(&bat_priv->vis_list_lock);
+	spin_lock_bh(&bat_priv->vis.list_lock);
 	list_add_tail(&entry->list, recv_list);
-	spin_unlock_bh(&bat_priv->vis_list_lock);
+	spin_unlock_bh(&bat_priv->vis.list_lock);
 }
 
 /* returns 1 if this mac is in the recv_list */
@@ -323,14 +323,14 @@
 {
 	const struct batadv_recvlist_node *entry;
 
-	spin_lock_bh(&bat_priv->vis_list_lock);
+	spin_lock_bh(&bat_priv->vis.list_lock);
 	list_for_each_entry(entry, recv_list, list) {
 		if (batadv_compare_eth(entry->mac, mac)) {
-			spin_unlock_bh(&bat_priv->vis_list_lock);
+			spin_unlock_bh(&bat_priv->vis.list_lock);
 			return 1;
 		}
 	}
-	spin_unlock_bh(&bat_priv->vis_list_lock);
+	spin_unlock_bh(&bat_priv->vis.list_lock);
 	return 0;
 }
 
@@ -354,7 +354,7 @@
 
 	*is_new = 0;
 	/* sanity check */
-	if (!bat_priv->vis_hash)
+	if (!bat_priv->vis.hash)
 		return NULL;
 
 	/* see if the packet is already in vis_hash */
@@ -385,7 +385,7 @@
 			}
 		}
 		/* remove old entry */
-		batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
+		batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
 				   batadv_vis_info_choose, old_info);
 		batadv_send_list_del(old_info);
 		kref_put(&old_info->refcount, batadv_free_info);
@@ -426,7 +426,7 @@
 	batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
 
 	/* try to add it */
-	hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+	hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
 				     batadv_vis_info_choose, info,
 				     &info->hash_entry);
 	if (hash_added != 0) {
@@ -449,7 +449,7 @@
 
 	make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
 
-	spin_lock_bh(&bat_priv->vis_hash_lock);
+	spin_lock_bh(&bat_priv->vis.hash_lock);
 	info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
 				 &is_new, make_broadcast);
 	if (!info)
@@ -461,7 +461,7 @@
 	if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
 		batadv_send_list_add(bat_priv, info);
 end:
-	spin_unlock_bh(&bat_priv->vis_hash_lock);
+	spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* handle an incoming client update packet and schedule forward if needed. */
@@ -484,7 +484,7 @@
 	    batadv_is_my_mac(vis_packet->target_orig))
 		are_target = 1;
 
-	spin_lock_bh(&bat_priv->vis_hash_lock);
+	spin_lock_bh(&bat_priv->vis.hash_lock);
 	info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
 				 &is_new, are_target);
 
@@ -505,7 +505,7 @@
 	}
 
 end:
-	spin_unlock_bh(&bat_priv->vis_hash_lock);
+	spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* Walk the originators and find the VIS server with the best tq. Set the packet
@@ -574,10 +574,11 @@
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
 	struct batadv_neigh_node *router;
-	struct batadv_vis_info *info = bat_priv->my_vis_info;
+	struct batadv_vis_info *info = bat_priv->vis.my_info;
 	struct batadv_vis_packet *packet;
 	struct batadv_vis_info_entry *entry;
 	struct batadv_tt_common_entry *tt_common_entry;
+	uint8_t *packet_pos;
 	int best_tq = -1;
 	uint32_t i;
 
@@ -618,8 +619,8 @@
 				goto next;
 
 			/* fill one entry into buffer. */
-			entry = (struct batadv_vis_info_entry *)
-				      skb_put(info->skb_packet, sizeof(*entry));
+			packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+			entry = (struct batadv_vis_info_entry *)packet_pos;
 			memcpy(entry->src,
 			       router->if_incoming->net_dev->dev_addr,
 			       ETH_ALEN);
@@ -636,7 +637,7 @@
 		rcu_read_unlock();
 	}
 
-	hash = bat_priv->tt_local_hash;
+	hash = bat_priv->tt.local_hash;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -644,9 +645,8 @@
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(tt_common_entry, node, head,
 					 hash_entry) {
-			entry = (struct batadv_vis_info_entry *)
-					skb_put(info->skb_packet,
-						sizeof(*entry));
+			packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+			entry = (struct batadv_vis_info_entry *)packet_pos;
 			memset(entry->src, 0, ETH_ALEN);
 			memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
 			entry->quality = 0; /* 0 means TT */
@@ -671,7 +671,7 @@
 static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
 {
 	uint32_t i;
-	struct batadv_hashtable *hash = bat_priv->vis_hash;
+	struct batadv_hashtable *hash = bat_priv->vis.hash;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
 	struct batadv_vis_info *info;
@@ -682,7 +682,7 @@
 		hlist_for_each_entry_safe(info, node, node_tmp,
 					  head, hash_entry) {
 			/* never purge own data. */
-			if (info == bat_priv->my_vis_info)
+			if (info == bat_priv->vis.my_info)
 				continue;
 
 			if (batadv_has_timed_out(info->first_seen,
@@ -814,34 +814,36 @@
 /* called from timer; send (and maybe generate) vis packet. */
 static void batadv_send_vis_packets(struct work_struct *work)
 {
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
+	struct delayed_work *delayed_work;
 	struct batadv_priv *bat_priv;
+	struct batadv_priv_vis *priv_vis;
 	struct batadv_vis_info *info;
 
-	bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
-	spin_lock_bh(&bat_priv->vis_hash_lock);
+	delayed_work = container_of(work, struct delayed_work, work);
+	priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
+	bat_priv = container_of(priv_vis, struct batadv_priv, vis);
+	spin_lock_bh(&bat_priv->vis.hash_lock);
 	batadv_purge_vis_packets(bat_priv);
 
 	if (batadv_generate_vis_packet(bat_priv) == 0) {
 		/* schedule if generation was successful */
-		batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
+		batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
 	}
 
-	while (!list_empty(&bat_priv->vis_send_list)) {
-		info = list_first_entry(&bat_priv->vis_send_list,
+	while (!list_empty(&bat_priv->vis.send_list)) {
+		info = list_first_entry(&bat_priv->vis.send_list,
 					typeof(*info), send_list);
 
 		kref_get(&info->refcount);
-		spin_unlock_bh(&bat_priv->vis_hash_lock);
+		spin_unlock_bh(&bat_priv->vis.hash_lock);
 
 		batadv_send_vis_packet(bat_priv, info);
 
-		spin_lock_bh(&bat_priv->vis_hash_lock);
+		spin_lock_bh(&bat_priv->vis.hash_lock);
 		batadv_send_list_del(info);
 		kref_put(&info->refcount, batadv_free_info);
 	}
-	spin_unlock_bh(&bat_priv->vis_hash_lock);
+	spin_unlock_bh(&bat_priv->vis.hash_lock);
 	batadv_start_vis_timer(bat_priv);
 }
 
@@ -856,37 +858,37 @@
 	unsigned long first_seen;
 	struct sk_buff *tmp_skb;
 
-	if (bat_priv->vis_hash)
+	if (bat_priv->vis.hash)
 		return 0;
 
-	spin_lock_bh(&bat_priv->vis_hash_lock);
+	spin_lock_bh(&bat_priv->vis.hash_lock);
 
-	bat_priv->vis_hash = batadv_hash_new(256);
-	if (!bat_priv->vis_hash) {
+	bat_priv->vis.hash = batadv_hash_new(256);
+	if (!bat_priv->vis.hash) {
 		pr_err("Can't initialize vis_hash\n");
 		goto err;
 	}
 
-	bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
-	if (!bat_priv->my_vis_info)
+	bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+	if (!bat_priv->vis.my_info)
 		goto err;
 
 	len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
-	bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
-	if (!bat_priv->my_vis_info->skb_packet)
+	bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
+	if (!bat_priv->vis.my_info->skb_packet)
 		goto free_info;
 
-	skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
-	tmp_skb = bat_priv->my_vis_info->skb_packet;
+	skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
+	tmp_skb = bat_priv->vis.my_info->skb_packet;
 	packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
 
 	/* prefill the vis info */
 	first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
-	bat_priv->my_vis_info->first_seen = first_seen;
-	INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
-	INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
-	kref_init(&bat_priv->my_vis_info->refcount);
-	bat_priv->my_vis_info->bat_priv = bat_priv;
+	bat_priv->vis.my_info->first_seen = first_seen;
+	INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
+	INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
+	kref_init(&bat_priv->vis.my_info->refcount);
+	bat_priv->vis.my_info->bat_priv = bat_priv;
 	packet->header.version = BATADV_COMPAT_VERSION;
 	packet->header.packet_type = BATADV_VIS;
 	packet->header.ttl = BATADV_TTL;
@@ -894,28 +896,28 @@
 	packet->reserved = 0;
 	packet->entries = 0;
 
-	INIT_LIST_HEAD(&bat_priv->vis_send_list);
+	INIT_LIST_HEAD(&bat_priv->vis.send_list);
 
-	hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+	hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
 				     batadv_vis_info_choose,
-				     bat_priv->my_vis_info,
-				     &bat_priv->my_vis_info->hash_entry);
+				     bat_priv->vis.my_info,
+				     &bat_priv->vis.my_info->hash_entry);
 	if (hash_added != 0) {
 		pr_err("Can't add own vis packet into hash\n");
 		/* not in hash, need to remove it manually. */
-		kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
+		kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
 		goto err;
 	}
 
-	spin_unlock_bh(&bat_priv->vis_hash_lock);
+	spin_unlock_bh(&bat_priv->vis.hash_lock);
 	batadv_start_vis_timer(bat_priv);
 	return 0;
 
 free_info:
-	kfree(bat_priv->my_vis_info);
-	bat_priv->my_vis_info = NULL;
+	kfree(bat_priv->vis.my_info);
+	bat_priv->vis.my_info = NULL;
 err:
-	spin_unlock_bh(&bat_priv->vis_hash_lock);
+	spin_unlock_bh(&bat_priv->vis.hash_lock);
 	batadv_vis_quit(bat_priv);
 	return -ENOMEM;
 }
@@ -933,23 +935,23 @@
 /* shutdown vis-server */
 void batadv_vis_quit(struct batadv_priv *bat_priv)
 {
-	if (!bat_priv->vis_hash)
+	if (!bat_priv->vis.hash)
 		return;
 
-	cancel_delayed_work_sync(&bat_priv->vis_work);
+	cancel_delayed_work_sync(&bat_priv->vis.work);
 
-	spin_lock_bh(&bat_priv->vis_hash_lock);
+	spin_lock_bh(&bat_priv->vis.hash_lock);
 	/* properly remove, kill timers ... */
-	batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
-	bat_priv->vis_hash = NULL;
-	bat_priv->my_vis_info = NULL;
-	spin_unlock_bh(&bat_priv->vis_hash_lock);
+	batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
+	bat_priv->vis.hash = NULL;
+	bat_priv->vis.my_info = NULL;
+	spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* schedule packets for (re)transmission */
 static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
 {
-	INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
-	queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
+	INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
+	queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
 			   msecs_to_jiffies(BATADV_VIS_INTERVAL));
 }
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 84e716e..873282f 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -20,7 +20,7 @@
 #ifndef _NET_BATMAN_ADV_VIS_H_
 #define _NET_BATMAN_ADV_VIS_H_
 
-/* timeout of vis packets in miliseconds */
+/* timeout of vis packets in milliseconds */
 #define BATADV_VIS_TIMEOUT		200000
 
 int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 4ff0bf3..0760d1f 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -316,7 +316,7 @@
 static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
 			       struct a2mp_cmd *hdr)
 {
-	BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+	BT_DBG("ident %d code 0x%2.2x", hdr->ident, hdr->code);
 
 	skb_pull(skb, le16_to_cpu(hdr->len));
 	return 0;
@@ -325,17 +325,19 @@
 /* Handle A2MP signalling */
 static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-	struct a2mp_cmd *hdr = (void *) skb->data;
+	struct a2mp_cmd *hdr;
 	struct amp_mgr *mgr = chan->data;
 	int err = 0;
 
 	amp_mgr_get(mgr);
 
 	while (skb->len >= sizeof(*hdr)) {
-		struct a2mp_cmd *hdr = (void *) skb->data;
-		u16 len = le16_to_cpu(hdr->len);
+		u16 len;
 
-		BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
+		hdr = (void *) skb->data;
+		len = le16_to_cpu(hdr->len);
+
+		BT_DBG("code 0x%2.2x id %d len %u", hdr->code, hdr->ident, len);
 
 		skb_pull(skb, sizeof(*hdr));
 
@@ -393,7 +395,9 @@
 
 	if (err) {
 		struct a2mp_cmd_rej rej;
+
 		rej.reason = __constant_cpu_to_le16(0);
+		hdr = (void *) skb->data;
 
 		BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
 
@@ -412,7 +416,7 @@
 
 static void a2mp_chan_close_cb(struct l2cap_chan *chan)
 {
-	l2cap_chan_destroy(chan);
+	l2cap_chan_put(chan);
 }
 
 static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index f7db579..58f9762 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -28,6 +28,7 @@
 #include <asm/ioctls.h>
 
 #include <net/bluetooth/bluetooth.h>
+#include <linux/proc_fs.h>
 
 #define VERSION "2.16"
 
@@ -532,6 +533,146 @@
 }
 EXPORT_SYMBOL(bt_sock_wait_state);
 
+#ifdef CONFIG_PROC_FS
+struct bt_seq_state {
+	struct bt_sock_list *l;
+};
+
+static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
+	__acquires(seq->private->l->lock)
+{
+	struct bt_seq_state *s = seq->private;
+	struct bt_sock_list *l = s->l;
+
+	read_lock(&l->lock);
+	return seq_hlist_start_head(&l->head, *pos);
+}
+
+static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct bt_seq_state *s = seq->private;
+	struct bt_sock_list *l = s->l;
+
+	return seq_hlist_next(v, &l->head, pos);
+}
+
+static void bt_seq_stop(struct seq_file *seq, void *v)
+	__releases(seq->private->l->lock)
+{
+	struct bt_seq_state *s = seq->private;
+	struct bt_sock_list *l = s->l;
+
+	read_unlock(&l->lock);
+}
+
+static int bt_seq_show(struct seq_file *seq, void *v)
+{
+	struct sock *sk;
+	struct bt_sock *bt;
+	struct bt_seq_state *s = seq->private;
+	struct bt_sock_list *l = s->l;
+	bdaddr_t src_baswapped, dst_baswapped;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent");
+
+		if (l->custom_seq_show) {
+			seq_putc(seq, ' ');
+			l->custom_seq_show(seq, v);
+		}
+
+		seq_putc(seq, '\n');
+	} else {
+		sk = sk_entry(v);
+		bt = bt_sk(sk);
+		baswap(&src_baswapped, &bt->src);
+		baswap(&dst_baswapped, &bt->dst);
+
+		seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %pM %pM %-6lu",
+			   sk,
+			   atomic_read(&sk->sk_refcnt),
+			   sk_rmem_alloc_get(sk),
+			   sk_wmem_alloc_get(sk),
+			   sock_i_uid(sk),
+			   sock_i_ino(sk),
+			   &src_baswapped,
+			   &dst_baswapped,
+			   bt->parent? sock_i_ino(bt->parent): 0LU);
+
+		if (l->custom_seq_show) {
+			seq_putc(seq, ' ');
+			l->custom_seq_show(seq, v);
+		}
+
+		seq_putc(seq, '\n');
+	}
+	return 0;
+}
+
+static struct seq_operations bt_seq_ops = {
+	.start = bt_seq_start,
+	.next  = bt_seq_next,
+	.stop  = bt_seq_stop,
+	.show  = bt_seq_show,
+};
+
+static int bt_seq_open(struct inode *inode, struct file *file)
+{
+	struct bt_sock_list *sk_list;
+	struct bt_seq_state *s;
+
+	sk_list = PDE(inode)->data;
+	s = __seq_open_private(file, &bt_seq_ops,
+			       sizeof(struct bt_seq_state));
+	if (s == NULL)
+		return -ENOMEM;
+
+	s->l = sk_list;
+	return 0;
+}
+
+int bt_procfs_init(struct module* module, struct net *net, const char *name,
+		   struct bt_sock_list* sk_list,
+		   int (* seq_show)(struct seq_file *, void *))
+{
+	struct proc_dir_entry * pde;
+
+	sk_list->custom_seq_show = seq_show;
+
+	sk_list->fops.owner     = module;
+	sk_list->fops.open      = bt_seq_open;
+	sk_list->fops.read      = seq_read;
+	sk_list->fops.llseek    = seq_lseek;
+	sk_list->fops.release   = seq_release_private;
+
+	pde = proc_net_fops_create(net, name, 0, &sk_list->fops);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	pde->data = sk_list;
+
+	return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+	proc_net_remove(net, name);
+}
+#else
+int bt_procfs_init(struct module* module, struct net *net, const char *name,
+		   struct bt_sock_list* sk_list,
+		   int (* seq_show)(struct seq_file *, void *))
+{
+	return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+}
+#endif
+EXPORT_SYMBOL(bt_procfs_init);
+EXPORT_SYMBOL(bt_procfs_cleanup);
+
 static struct net_proto_family bt_sock_family_ops = {
 	.owner	= THIS_MODULE,
 	.family	= PF_BLUETOOTH,
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 5e5f5b4..5b6cc0b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -29,6 +29,10 @@
 
 #include "bnep.h"
 
+static struct bt_sock_list bnep_sk_list = {
+	.lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock)
+};
+
 static int bnep_sock_release(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
@@ -38,6 +42,8 @@
 	if (!sk)
 		return 0;
 
+	bt_sock_unlink(&bnep_sk_list, sk);
+
 	sock_orphan(sk);
 	sock_put(sk);
 	return 0;
@@ -204,6 +210,7 @@
 	sk->sk_protocol = protocol;
 	sk->sk_state	= BT_OPEN;
 
+	bt_sock_link(&bnep_sk_list, sk);
 	return 0;
 }
 
@@ -222,19 +229,30 @@
 		return err;
 
 	err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
-	if (err < 0)
+	if (err < 0) {
+		BT_ERR("Can't register BNEP socket");
 		goto error;
+	}
+
+	err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL);
+	if (err < 0) {
+		BT_ERR("Failed to create BNEP proc file");
+		bt_sock_unregister(BTPROTO_BNEP);
+		goto error;
+	}
+
+	BT_INFO("BNEP socket layer initialized");
 
 	return 0;
 
 error:
-	BT_ERR("Can't register BNEP socket");
 	proto_unregister(&bnep_proto);
 	return err;
 }
 
 void __exit bnep_sock_cleanup(void)
 {
+	bt_procfs_cleanup(&init_net, "bnep");
 	if (bt_sock_unregister(BTPROTO_BNEP) < 0)
 		BT_ERR("Can't unregister BNEP socket");
 
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 311668d..d5cacef 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -42,6 +42,10 @@
 
 #include "cmtp.h"
 
+static struct bt_sock_list cmtp_sk_list = {
+	.lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock)
+};
+
 static int cmtp_sock_release(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
@@ -51,6 +55,8 @@
 	if (!sk)
 		return 0;
 
+	bt_sock_unlink(&cmtp_sk_list, sk);
+
 	sock_orphan(sk);
 	sock_put(sk);
 
@@ -214,6 +220,8 @@
 	sk->sk_protocol = protocol;
 	sk->sk_state    = BT_OPEN;
 
+	bt_sock_link(&cmtp_sk_list, sk);
+
 	return 0;
 }
 
@@ -232,19 +240,30 @@
 		return err;
 
 	err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops);
-	if (err < 0)
+	if (err < 0) {
+		BT_ERR("Can't register CMTP socket");
 		goto error;
+	}
+
+	err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL);
+	if (err < 0) {
+		BT_ERR("Failed to create CMTP proc file");
+		bt_sock_unregister(BTPROTO_HIDP);
+		goto error;
+	}
+
+	BT_INFO("CMTP socket layer initialized");
 
 	return 0;
 
 error:
-	BT_ERR("Can't register CMTP socket");
 	proto_unregister(&cmtp_proto);
 	return err;
 }
 
 void cmtp_cleanup_sockets(void)
 {
+	bt_procfs_cleanup(&init_net, "cmtp");
 	if (bt_sock_unregister(BTPROTO_CMTP) < 0)
 		BT_ERR("Can't unregister CMTP socket");
 
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d4de5db..fa974a1 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -696,7 +696,8 @@
 		hci_dev_hold(hdev);
 		set_bit(HCI_UP, &hdev->flags);
 		hci_notify(hdev, HCI_DEV_UP);
-		if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+		if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+		    mgmt_valid_hdev(hdev)) {
 			hci_dev_lock(hdev);
 			mgmt_powered(hdev, 1);
 			hci_dev_unlock(hdev);
@@ -797,7 +798,8 @@
 	 * and no tasks are scheduled. */
 	hdev->close(hdev);
 
-	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
+	    mgmt_valid_hdev(hdev)) {
 		hci_dev_lock(hdev);
 		mgmt_powered(hdev, 0);
 		hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 41ff978..4fd2cf3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -513,7 +513,7 @@
 	if (hdev->features[3] & LMP_RSSI_INQ)
 		events[4] |= 0x02; /* Inquiry Result with RSSI */
 
-	if (hdev->features[5] & LMP_SNIFF_SUBR)
+	if (lmp_sniffsubr_capable(hdev))
 		events[5] |= 0x20; /* Sniff Subrating */
 
 	if (hdev->features[5] & LMP_PAUSE_ENC)
@@ -522,13 +522,13 @@
 	if (hdev->features[6] & LMP_EXT_INQ)
 		events[5] |= 0x40; /* Extended Inquiry Result */
 
-	if (hdev->features[6] & LMP_NO_FLUSH)
+	if (lmp_no_flush_capable(hdev))
 		events[7] |= 0x01; /* Enhanced Flush Complete */
 
 	if (hdev->features[7] & LMP_LSTO)
 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
 
-	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+	if (lmp_ssp_capable(hdev)) {
 		events[6] |= 0x01;	/* IO Capability Request */
 		events[6] |= 0x02;	/* IO Capability Response */
 		events[6] |= 0x04;	/* User Confirmation Request */
@@ -541,7 +541,7 @@
 					 * Features Notification */
 	}
 
-	if (hdev->features[4] & LMP_LE)
+	if (lmp_le_capable(hdev))
 		events[7] |= 0x20;	/* LE Meta-Event */
 
 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
@@ -623,11 +623,11 @@
 	struct hci_cp_write_def_link_policy cp;
 	u16 link_policy = 0;
 
-	if (hdev->features[0] & LMP_RSWITCH)
+	if (lmp_rswitch_capable(hdev))
 		link_policy |= HCI_LP_RSWITCH;
 	if (hdev->features[0] & LMP_HOLD)
 		link_policy |= HCI_LP_HOLD;
-	if (hdev->features[0] & LMP_SNIFF)
+	if (lmp_sniff_capable(hdev))
 		link_policy |= HCI_LP_SNIFF;
 	if (hdev->features[1] & LMP_PARK)
 		link_policy |= HCI_LP_PARK;
@@ -686,7 +686,7 @@
 		hdev->esco_type |= (ESCO_HV3);
 	}
 
-	if (hdev->features[3] & LMP_ESCO)
+	if (lmp_esco_capable(hdev))
 		hdev->esco_type |= (ESCO_EV3);
 
 	if (hdev->features[4] & LMP_EV4)
@@ -746,7 +746,7 @@
 		break;
 	}
 
-	if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
+	if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
 		hci_set_le_support(hdev);
 
 done:
@@ -1365,6 +1365,9 @@
 		return false;
 
 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
+	if (!e)
+		return false;
+
 	if (hci_resolve_name(hdev, e) == 0) {
 		e->name_state = NAME_PENDING;
 		return true;
@@ -1393,12 +1396,20 @@
 		return;
 
 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
-	if (e) {
+	/* If the device was not found in a list of found devices names of which
+	 * are pending. there is no need to continue resolving a next name as it
+	 * will be done upon receiving another Remote Name Request Complete
+	 * Event */
+	if (!e)
+		return;
+
+	list_del(&e->list);
+	if (name) {
 		e->name_state = NAME_KNOWN;
-		list_del(&e->list);
-		if (name)
-			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
-					 e->data.rssi, name, name_len);
+		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
+				 e->data.rssi, name, name_len);
+	} else {
+		e->name_state = NAME_NOT_KNOWN;
 	}
 
 	if (hci_resolve_next_name(hdev))
@@ -1614,43 +1625,30 @@
 
 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
 {
-	struct hci_cp_le_create_conn *cp;
 	struct hci_conn *conn;
 
 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
-	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
-	if (!cp)
-		return;
-
-	hci_dev_lock(hdev);
-
-	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
-
-	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
-	       conn);
-
 	if (status) {
-		if (conn && conn->state == BT_CONNECT) {
-			conn->state = BT_CLOSED;
-			mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
-					    conn->dst_type, status);
-			hci_proto_connect_cfm(conn, status);
-			hci_conn_del(conn);
-		}
-	} else {
-		if (!conn) {
-			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
-			if (conn) {
-				conn->dst_type = cp->peer_addr_type;
-				conn->out = true;
-			} else {
-				BT_ERR("No memory for new connection");
-			}
-		}
-	}
+		hci_dev_lock(hdev);
 
-	hci_dev_unlock(hdev);
+		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+		if (!conn) {
+			hci_dev_unlock(hdev);
+			return;
+		}
+
+		BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
+		       conn);
+
+		conn->state = BT_CLOSED;
+		mgmt_connect_failed(hdev, &conn->dst, conn->type,
+				    conn->dst_type, status);
+		hci_proto_connect_cfm(conn, status);
+		hci_conn_del(conn);
+
+		hci_dev_unlock(hdev);
+	}
 }
 
 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
@@ -1762,7 +1760,12 @@
 		if (conn->type == ACL_LINK) {
 			conn->state = BT_CONFIG;
 			hci_conn_hold(conn);
-			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
+			    !hci_find_link_key(hdev, &ev->bdaddr))
+				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
+			else
+				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
 		} else
 			conn->state = BT_CONNECTED;
 
@@ -3252,12 +3255,8 @@
 
 	BT_DBG("%s", hdev->name);
 
-	hci_dev_lock(hdev);
-
 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
-
-	hci_dev_unlock(hdev);
 }
 
 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
@@ -3350,11 +3349,23 @@
 
 	hci_dev_lock(hdev);
 
-	if (ev->status) {
-		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-		if (!conn)
+	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+	if (!conn) {
+		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+		if (!conn) {
+			BT_ERR("No memory for new connection");
 			goto unlock;
+		}
 
+		conn->dst_type = ev->bdaddr_type;
+
+		if (ev->role == LE_CONN_ROLE_MASTER) {
+			conn->out = true;
+			conn->link_mode |= HCI_LM_MASTER;
+		}
+	}
+
+	if (ev->status) {
 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
 				    conn->dst_type, ev->status);
 		hci_proto_connect_cfm(conn, ev->status);
@@ -3363,18 +3374,6 @@
 		goto unlock;
 	}
 
-	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
-	if (!conn) {
-		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
-		if (!conn) {
-			BT_ERR("No memory for new connection");
-			hci_dev_unlock(hdev);
-			return;
-		}
-
-		conn->dst_type = ev->bdaddr_type;
-	}
-
 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
 				      conn->dst_type, 0, NULL, 0, NULL);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index a7f04de..bb64331 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -694,6 +694,7 @@
 	*addr_len = sizeof(*haddr);
 	haddr->hci_family = AF_BLUETOOTH;
 	haddr->hci_dev    = hdev->id;
+	haddr->hci_channel= 0;
 
 	release_sock(sk);
 	return 0;
@@ -1009,6 +1010,7 @@
 		{
 			struct hci_filter *f = &hci_pi(sk)->filter;
 
+			memset(&uf, 0, sizeof(uf));
 			uf.type_mask = f->type_mask;
 			uf.opcode    = f->opcode;
 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
@@ -1100,21 +1102,30 @@
 		return err;
 
 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
-	if (err < 0)
+	if (err < 0) {
+		BT_ERR("HCI socket registration failed");
 		goto error;
+	}
+
+	err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL);
+	if (err < 0) {
+		BT_ERR("Failed to create HCI proc file");
+		bt_sock_unregister(BTPROTO_HCI);
+		goto error;
+	}
 
 	BT_INFO("HCI socket layer initialized");
 
 	return 0;
 
 error:
-	BT_ERR("HCI socket registration failed");
 	proto_unregister(&hci_sk_proto);
 	return err;
 }
 
 void hci_sock_cleanup(void)
 {
+	bt_procfs_cleanup(&init_net, "hci");
 	if (bt_sock_unregister(BTPROTO_HCI) < 0)
 		BT_ERR("HCI socket unregistration failed");
 
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 18b3f68..eca3889 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -25,6 +25,10 @@
 
 #include "hidp.h"
 
+static struct bt_sock_list hidp_sk_list = {
+	.lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock)
+};
+
 static int hidp_sock_release(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
@@ -34,6 +38,8 @@
 	if (!sk)
 		return 0;
 
+	bt_sock_unlink(&hidp_sk_list, sk);
+
 	sock_orphan(sk);
 	sock_put(sk);
 
@@ -253,6 +259,8 @@
 	sk->sk_protocol = protocol;
 	sk->sk_state	= BT_OPEN;
 
+	bt_sock_link(&hidp_sk_list, sk);
+
 	return 0;
 }
 
@@ -271,8 +279,19 @@
 		return err;
 
 	err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops);
-	if (err < 0)
+	if (err < 0) {
+		BT_ERR("Can't register HIDP socket");
 		goto error;
+	}
+
+	err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL);
+	if (err < 0) {
+		BT_ERR("Failed to create HIDP proc file");
+		bt_sock_unregister(BTPROTO_HIDP);
+		goto error;
+	}
+
+	BT_INFO("HIDP socket layer initialized");
 
 	return 0;
 
@@ -284,6 +303,7 @@
 
 void __exit hidp_cleanup_sockets(void)
 {
+	bt_procfs_cleanup(&init_net, "hidp");
 	if (bt_sock_unregister(BTPROTO_HIDP) < 0)
 		BT_ERR("Can't unregister HIDP socket");
 
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a8964db..f0a3ab1 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -416,13 +416,30 @@
 	return chan;
 }
 
-void l2cap_chan_destroy(struct l2cap_chan *chan)
+static void l2cap_chan_destroy(struct l2cap_chan *chan)
 {
+	BT_DBG("chan %p", chan);
+
 	write_lock(&chan_list_lock);
 	list_del(&chan->global_l);
 	write_unlock(&chan_list_lock);
 
-	l2cap_chan_put(chan);
+	kfree(chan);
+}
+
+void l2cap_chan_hold(struct l2cap_chan *c)
+{
+	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
+
+	atomic_inc(&c->refcnt);
+}
+
+void l2cap_chan_put(struct l2cap_chan *c)
+{
+	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
+
+	if (atomic_dec_and_test(&c->refcnt))
+		l2cap_chan_destroy(c);
 }
 
 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
@@ -1181,6 +1198,7 @@
 	sk = chan->sk;
 
 	hci_conn_hold(conn->hcon);
+	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
 
 	bacpy(&bt_sk(sk)->src, conn->src);
 	bacpy(&bt_sk(sk)->dst, conn->dst);
@@ -5329,7 +5347,7 @@
 	return exact ? lm1 : lm2;
 }
 
-int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
 {
 	struct l2cap_conn *conn;
 
@@ -5342,7 +5360,6 @@
 	} else
 		l2cap_conn_del(hcon, bt_to_errno(status));
 
-	return 0;
 }
 
 int l2cap_disconn_ind(struct hci_conn *hcon)
@@ -5356,12 +5373,11 @@
 	return conn->disc_reason;
 }
 
-int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
 {
 	BT_DBG("hcon %p reason %d", hcon, reason);
 
 	l2cap_conn_del(hcon, bt_to_errno(reason));
-	return 0;
 }
 
 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
@@ -5404,6 +5420,11 @@
 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
 		       state_to_string(chan->state));
 
+		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+			l2cap_chan_unlock(chan);
+			continue;
+		}
+
 		if (chan->scid == L2CAP_CID_LE_DATA) {
 			if (!status && encrypt) {
 				chan->sec_level = hcon->sec_level;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index a4bb27e..3a6ce73 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -34,6 +34,10 @@
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/smp.h>
 
+static struct bt_sock_list l2cap_sk_list = {
+	.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
 static const struct proto_ops l2cap_sock_ops;
 static void l2cap_sock_init(struct sock *sk, struct sock *parent);
 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
@@ -245,6 +249,7 @@
 
 	BT_DBG("sock %p, sk %p", sock, sk);
 
+	memset(la, 0, sizeof(struct sockaddr_l2));
 	addr->sa_family = AF_BLUETOOTH;
 	*len = sizeof(struct sockaddr_l2);
 
@@ -823,7 +828,7 @@
 
 	/* Kill poor orphan */
 
-	l2cap_chan_destroy(l2cap_pi(sk)->chan);
+	l2cap_chan_put(l2cap_pi(sk)->chan);
 	sock_set_flag(sk, SOCK_DEAD);
 	sock_put(sk);
 }
@@ -886,6 +891,8 @@
 	if (!sk)
 		return 0;
 
+	bt_sock_unlink(&l2cap_sk_list, sk);
+
 	err = l2cap_sock_shutdown(sock, 2);
 
 	sock_orphan(sk);
@@ -1174,7 +1181,7 @@
 
 	chan = l2cap_chan_create();
 	if (!chan) {
-		l2cap_sock_kill(sk);
+		sk_free(sk);
 		return NULL;
 	}
 
@@ -1210,6 +1217,7 @@
 		return -ENOMEM;
 
 	l2cap_sock_init(sk, NULL);
+	bt_sock_link(&l2cap_sk_list, sk);
 	return 0;
 }
 
@@ -1248,21 +1256,30 @@
 		return err;
 
 	err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
-	if (err < 0)
+	if (err < 0) {
+		BT_ERR("L2CAP socket registration failed");
 		goto error;
+	}
+
+	err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, NULL);
+	if (err < 0) {
+		BT_ERR("Failed to create L2CAP proc file");
+		bt_sock_unregister(BTPROTO_L2CAP);
+		goto error;
+	}
 
 	BT_INFO("L2CAP socket layer initialized");
 
 	return 0;
 
 error:
-	BT_ERR("L2CAP socket registration failed");
 	proto_unregister(&l2cap_proto);
 	return err;
 }
 
 void l2cap_cleanup_sockets(void)
 {
+	bt_procfs_cleanup(&init_net, "l2cap");
 	if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
 		BT_ERR("L2CAP socket unregistration failed");
 
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ad6613d..a3329cb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -193,6 +193,11 @@
 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
 };
 
+bool mgmt_valid_hdev(struct hci_dev *hdev)
+{
+	return hdev->dev_type == HCI_BREDR;
+}
+
 static u8 mgmt_status(u8 hci_status)
 {
 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
@@ -317,7 +322,6 @@
 			   u16 data_len)
 {
 	struct mgmt_rp_read_index_list *rp;
-	struct list_head *p;
 	struct hci_dev *d;
 	size_t rp_len;
 	u16 count;
@@ -328,7 +332,10 @@
 	read_lock(&hci_dev_list_lock);
 
 	count = 0;
-	list_for_each(p, &hci_dev_list) {
+	list_for_each_entry(d, &hci_dev_list, list) {
+		if (!mgmt_valid_hdev(d))
+			continue;
+
 		count++;
 	}
 
@@ -346,6 +353,9 @@
 		if (test_bit(HCI_SETUP, &d->dev_flags))
 			continue;
 
+		if (!mgmt_valid_hdev(d))
+			continue;
+
 		rp->index[i++] = cpu_to_le16(d->id);
 		BT_DBG("Added hci%u", d->id);
 	}
@@ -370,10 +380,10 @@
 	settings |= MGMT_SETTING_DISCOVERABLE;
 	settings |= MGMT_SETTING_PAIRABLE;
 
-	if (hdev->features[6] & LMP_SIMPLE_PAIR)
+	if (lmp_ssp_capable(hdev))
 		settings |= MGMT_SETTING_SSP;
 
-	if (!(hdev->features[4] & LMP_NO_BREDR)) {
+	if (lmp_bredr_capable(hdev)) {
 		settings |= MGMT_SETTING_BREDR;
 		settings |= MGMT_SETTING_LINK_SECURITY;
 	}
@@ -381,7 +391,7 @@
 	if (enable_hs)
 		settings |= MGMT_SETTING_HS;
 
-	if (hdev->features[4] & LMP_LE)
+	if (lmp_le_capable(hdev))
 		settings |= MGMT_SETTING_LE;
 
 	return settings;
@@ -403,7 +413,7 @@
 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
 		settings |= MGMT_SETTING_PAIRABLE;
 
-	if (!(hdev->features[4] & LMP_NO_BREDR))
+	if (lmp_bredr_capable(hdev))
 		settings |= MGMT_SETTING_BREDR;
 
 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
@@ -1111,7 +1121,7 @@
 
 	hci_dev_lock(hdev);
 
-	if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+	if (!lmp_ssp_capable(hdev)) {
 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
 				 MGMT_STATUS_NOT_SUPPORTED);
 		goto failed;
@@ -1195,7 +1205,7 @@
 
 	hci_dev_lock(hdev);
 
-	if (!(hdev->features[4] & LMP_LE)) {
+	if (!lmp_le_capable(hdev)) {
 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
 				 MGMT_STATUS_NOT_SUPPORTED);
 		goto unlock;
@@ -2191,7 +2201,7 @@
 		goto unlock;
 	}
 
-	if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+	if (!lmp_ssp_capable(hdev)) {
 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
 				 MGMT_STATUS_NOT_SUPPORTED);
 		goto unlock;
@@ -2820,6 +2830,9 @@
 
 int mgmt_index_added(struct hci_dev *hdev)
 {
+	if (!mgmt_valid_hdev(hdev))
+		return -ENOTSUPP;
+
 	return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
 }
 
@@ -2827,6 +2840,9 @@
 {
 	u8 status = MGMT_STATUS_INVALID_INDEX;
 
+	if (!mgmt_valid_hdev(hdev))
+		return -ENOTSUPP;
+
 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
 
 	return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 7e1e596..b3226f3 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -528,6 +528,7 @@
 
 	BT_DBG("sock %p, sk %p", sock, sk);
 
+	memset(sa, 0, sizeof(*sa));
 	sa->rc_family  = AF_BLUETOOTH;
 	sa->rc_channel = rfcomm_pi(sk)->channel;
 	if (peer)
@@ -822,6 +823,7 @@
 		}
 
 		sec.level = rfcomm_pi(sk)->sec_level;
+		sec.key_size = 0;
 
 		len = min_t(unsigned int, len, sizeof(sec));
 		if (copy_to_user(optval, (char *) &sec, len))
@@ -1033,8 +1035,17 @@
 		return err;
 
 	err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
-	if (err < 0)
+	if (err < 0) {
+		BT_ERR("RFCOMM socket layer registration failed");
 		goto error;
+	}
+
+	err = bt_procfs_init(THIS_MODULE, &init_net, "rfcomm", &rfcomm_sk_list, NULL);
+	if (err < 0) {
+		BT_ERR("Failed to create RFCOMM proc file");
+		bt_sock_unregister(BTPROTO_RFCOMM);
+		goto error;
+	}
 
 	if (bt_debugfs) {
 		rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
@@ -1048,13 +1059,14 @@
 	return 0;
 
 error:
-	BT_ERR("RFCOMM socket layer registration failed");
 	proto_unregister(&rfcomm_proto);
 	return err;
 }
 
 void __exit rfcomm_cleanup_sockets(void)
 {
+	bt_procfs_cleanup(&init_net, "rfcomm");
+
 	debugfs_remove(rfcomm_sock_debugfs);
 
 	if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index cb96077..56f1823 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -456,7 +456,7 @@
 
 	size = sizeof(*dl) + dev_num * sizeof(*di);
 
-	dl = kmalloc(size, GFP_KERNEL);
+	dl = kzalloc(size, GFP_KERNEL);
 	if (!dl)
 		return -ENOMEM;
 
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 40bbe25..dc42b91 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -131,6 +131,15 @@
 		sco_sock_clear_timer(sk);
 		sco_chan_del(sk, err);
 		bh_unlock_sock(sk);
+
+		sco_conn_lock(conn);
+		conn->sk = NULL;
+		sco_pi(sk)->conn = NULL;
+		sco_conn_unlock(conn);
+
+		if (conn->hcon)
+			hci_conn_put(conn->hcon);
+
 		sco_sock_kill(sk);
 	}
 
@@ -821,16 +830,6 @@
 
 	BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
 
-	if (conn) {
-		sco_conn_lock(conn);
-		conn->sk = NULL;
-		sco_pi(sk)->conn = NULL;
-		sco_conn_unlock(conn);
-
-		if (conn->hcon)
-			hci_conn_put(conn->hcon);
-	}
-
 	sk->sk_state = BT_CLOSED;
 	sk->sk_err   = err;
 	sk->sk_state_change(sk);
@@ -913,7 +912,7 @@
 	return lm;
 }
 
-int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
 {
 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
 	if (!status) {
@@ -924,16 +923,13 @@
 			sco_conn_ready(conn);
 	} else
 		sco_conn_del(hcon, bt_to_errno(status));
-
-	return 0;
 }
 
-int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
 {
 	BT_DBG("hcon %p reason %d", hcon, reason);
 
 	sco_conn_del(hcon, bt_to_errno(reason));
-	return 0;
 }
 
 int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
@@ -1026,6 +1022,13 @@
 		goto error;
 	}
 
+	err = bt_procfs_init(THIS_MODULE, &init_net, "sco", &sco_sk_list, NULL);
+	if (err < 0) {
+		BT_ERR("Failed to create SCO proc file");
+		bt_sock_unregister(BTPROTO_SCO);
+		goto error;
+	}
+
 	if (bt_debugfs) {
 		sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
 						  NULL, &sco_debugfs_fops);
@@ -1044,6 +1047,8 @@
 
 void __exit sco_exit(void)
 {
+	bt_procfs_cleanup(&init_net, "sco");
+
 	debugfs_remove(sco_debugfs);
 
 	if (bt_sock_unregister(BTPROTO_SCO) < 0)
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 16ef0dc..901a616 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -579,8 +579,11 @@
 
 	if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
 		smp = smp_chan_create(conn);
+	else
+		smp = conn->smp_chan;
 
-	smp = conn->smp_chan;
+	if (!smp)
+		return SMP_UNSPECIFIED;
 
 	smp->preq[0] = SMP_CMD_PAIRING_REQ;
 	memcpy(&smp->preq[1], req, sizeof(*req));
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 3334845..070e8a6 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -31,9 +31,11 @@
 	struct net_bridge_mdb_entry *mdst;
 	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
 
+	rcu_read_lock();
 #ifdef CONFIG_BRIDGE_NETFILTER
 	if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
 		br_nf_pre_routing_finish_bridge_slow(skb);
+		rcu_read_unlock();
 		return NETDEV_TX_OK;
 	}
 #endif
@@ -48,7 +50,6 @@
 	skb_reset_mac_header(skb);
 	skb_pull(skb, ETH_HLEN);
 
-	rcu_read_lock();
 	if (is_broadcast_ether_addr(dest))
 		br_flood_deliver(br, skb);
 	else if (is_multicast_ether_addr(dest)) {
@@ -206,24 +207,23 @@
 static void br_netpoll_cleanup(struct net_device *dev)
 {
 	struct net_bridge *br = netdev_priv(dev);
-	struct net_bridge_port *p, *n;
+	struct net_bridge_port *p;
 
-	list_for_each_entry_safe(p, n, &br->port_list, list) {
+	list_for_each_entry(p, &br->port_list, list)
 		br_netpoll_disable(p);
-	}
 }
 
-static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
+			    gfp_t gfp)
 {
 	struct net_bridge *br = netdev_priv(dev);
-	struct net_bridge_port *p, *n;
+	struct net_bridge_port *p;
 	int err = 0;
 
-	list_for_each_entry_safe(p, n, &br->port_list, list) {
+	list_for_each_entry(p, &br->port_list, list) {
 		if (!p->dev)
 			continue;
-
-		err = br_netpoll_enable(p);
+		err = br_netpoll_enable(p, gfp);
 		if (err)
 			goto fail;
 	}
@@ -236,17 +236,17 @@
 	goto out;
 }
 
-int br_netpoll_enable(struct net_bridge_port *p)
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
 {
 	struct netpoll *np;
 	int err = 0;
 
-	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
+	np = kzalloc(sizeof(*p->np), gfp);
 	err = -ENOMEM;
 	if (!np)
 		goto out;
 
-	err = __netpoll_setup(np, p->dev);
+	err = __netpoll_setup(np, p->dev, gfp);
 	if (err) {
 		kfree(np);
 		goto out;
@@ -267,11 +267,7 @@
 
 	p->np = NULL;
 
-	/* Wait for transmitting packets to finish before freeing. */
-	synchronize_rcu_bh();
-
-	__netpoll_cleanup(np);
-	kfree(np);
+	__netpoll_free_rcu(np);
 }
 
 #endif
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index d21f323..9ce430b 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -312,7 +312,7 @@
 
 			fe->is_local = f->is_local;
 			if (!f->is_static)
-				fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
+				fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
 			++fe;
 			++num;
 		}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index e9466d4..02015a5 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -65,7 +65,7 @@
 {
 	skb->dev = to->dev;
 
-	if (unlikely(netpoll_tx_running(to->dev))) {
+	if (unlikely(netpoll_tx_running(to->br->dev))) {
 		if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
 			kfree_skb(skb);
 		else {
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e1144e1..1c8fdc3 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -361,7 +361,7 @@
 	if (err)
 		goto err2;
 
-	if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
+	if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
 		goto err3;
 
 	err = netdev_set_master(dev, br->dev);
@@ -427,6 +427,10 @@
 	if (!p || p->br != br)
 		return -EINVAL;
 
+	/* Since more than one interface can be attached to a bridge,
+	 * there still maybe an alternate path for netconsole to use;
+	 * therefore there is no reason for a NETDEV_RELEASE event.
+	 */
 	del_nbp(p);
 
 	spin_lock_bh(&br->lock);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index a768b24..f507d2a 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -316,7 +316,7 @@
 		netpoll_send_skb(np, skb);
 }
 
-extern int br_netpoll_enable(struct net_bridge_port *p);
+extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
 extern void br_netpoll_disable(struct net_bridge_port *p);
 #else
 static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
@@ -329,7 +329,7 @@
 {
 }
 
-static inline int br_netpoll_enable(struct net_bridge_port *p)
+static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
 {
 	return 0;
 }
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index a6747e6..c3530a8 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -170,5 +170,5 @@
 unsigned long br_timer_value(const struct timer_list *timer)
 {
 	return timer_pending(timer)
-		? jiffies_to_clock_t(timer->expires - jiffies) : 0;
+		? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0;
 }
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 42e6bd0..3c2e9dc 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -100,9 +100,7 @@
 static int __net_init frame_filter_net_init(struct net *net)
 {
 	net->xt.frame_filter = ebt_register_table(net, &frame_filter);
-	if (IS_ERR(net->xt.frame_filter))
-		return PTR_ERR(net->xt.frame_filter);
-	return 0;
+	return PTR_RET(net->xt.frame_filter);
 }
 
 static void __net_exit frame_filter_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 6dc2f87..10871bc 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -100,9 +100,7 @@
 static int __net_init frame_nat_net_init(struct net *net)
 {
 	net->xt.frame_nat = ebt_register_table(net, &frame_nat);
-	if (IS_ERR(net->xt.frame_nat))
-		return PTR_ERR(net->xt.frame_nat);
-	return 0;
+	return PTR_RET(net->xt.frame_nat);
 }
 
 static void __net_exit frame_nat_net_exit(struct net *net)
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 69771c0..e597733 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -94,6 +94,10 @@
 
 	/* check the version of IP */
 	ip_version = skb_header_pointer(skb, 0, 1, &buf);
+	if (!ip_version) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
 
 	switch (*ip_version >> 4) {
 	case 4:
diff --git a/net/core/dev.c b/net/core/dev.c
index 026bb4a..3401e2d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1055,6 +1055,8 @@
  */
 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
 {
+	char *new_ifalias;
+
 	ASSERT_RTNL();
 
 	if (len >= IFALIASZ)
@@ -1068,9 +1070,10 @@
 		return 0;
 	}
 
-	dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
-	if (!dev->ifalias)
+	new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
+	if (!new_ifalias)
 		return -ENOMEM;
+	dev->ifalias = new_ifalias;
 
 	strlcpy(dev->ifalias, alias, len+1);
 	return len;
@@ -1106,11 +1109,23 @@
 }
 EXPORT_SYMBOL(netdev_state_change);
 
-int netdev_bonding_change(struct net_device *dev, unsigned long event)
+/**
+ * 	netdev_notify_peers - notify network peers about existence of @dev
+ * 	@dev: network device
+ *
+ * Generate traffic such that interested network peers are aware of
+ * @dev, such as by generating a gratuitous ARP. This may be used when
+ * a device wants to inform the rest of the network about some sort of
+ * reconfiguration such as a failover event or virtual machine
+ * migration.
+ */
+void netdev_notify_peers(struct net_device *dev)
 {
-	return call_netdevice_notifiers(event, dev);
+	rtnl_lock();
+	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+	rtnl_unlock();
 }
-EXPORT_SYMBOL(netdev_bonding_change);
+EXPORT_SYMBOL(netdev_notify_peers);
 
 /**
  *	dev_load 	- load a network module
@@ -1391,7 +1406,6 @@
 				nb->notifier_call(nb, NETDEV_DOWN, dev);
 			}
 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
 		}
 	}
 
@@ -1433,7 +1447,6 @@
 				nb->notifier_call(nb, NETDEV_DOWN, dev);
 			}
 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
 		}
 	}
 unlock:
@@ -1639,6 +1652,19 @@
 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
 
+static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
+{
+	if (ptype->af_packet_priv == NULL)
+		return false;
+
+	if (ptype->id_match)
+		return ptype->id_match(ptype, skb->sk);
+	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
+		return true;
+
+	return false;
+}
+
 /*
  *	Support routine. Sends outgoing frames to any network
  *	taps currently in use.
@@ -1656,8 +1682,7 @@
 		 * they originated from - MvS (miquels@drinkel.ow.org)
 		 */
 		if ((ptype->dev == dev || !ptype->dev) &&
-		    (ptype->af_packet_priv == NULL ||
-		     (struct sock *)ptype->af_packet_priv != skb->sk)) {
+		    (!skb_loop_sk(ptype, skb))) {
 			if (pt_prev) {
 				deliver_skb(skb2, pt_prev, skb->dev);
 				pt_prev = ptype;
@@ -2134,6 +2159,9 @@
 	__be16 protocol = skb->protocol;
 	netdev_features_t features = skb->dev->features;
 
+	if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+		features &= ~NETIF_F_GSO_MASK;
+
 	if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 		protocol = veh->h_vlan_encapsulated_proto;
@@ -5219,12 +5247,12 @@
  */
 static int dev_new_index(struct net *net)
 {
-	static int ifindex;
+	int ifindex = net->ifindex;
 	for (;;) {
 		if (++ifindex <= 0)
 			ifindex = 1;
 		if (!__dev_get_by_index(net, ifindex))
-			return ifindex;
+			return net->ifindex = ifindex;
 	}
 }
 
@@ -5302,10 +5330,6 @@
 		netdev_unregister_kobject(dev);
 	}
 
-	/* Process any work delayed until the end of the batch */
-	dev = list_first_entry(head, struct net_device, unreg_list);
-	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
-
 	synchronize_net();
 
 	list_for_each_entry(dev, head, unreg_list)
@@ -5577,7 +5601,12 @@
 		}
 	}
 
-	dev->ifindex = dev_new_index(net);
+	ret = -EBUSY;
+	if (!dev->ifindex)
+		dev->ifindex = dev_new_index(net);
+	else if (__dev_get_by_index(net, dev->ifindex))
+		goto err_uninit;
+
 	if (dev->iflink == -1)
 		dev->iflink = dev->ifindex;
 
@@ -5620,6 +5649,8 @@
 
 	set_bit(__LINK_STATE_PRESENT, &dev->state);
 
+	linkwatch_init_dev(dev);
+
 	dev_init_scheduler(dev);
 	dev_hold(dev);
 	list_netdevice(dev);
@@ -5727,6 +5758,7 @@
 
 /**
  * netdev_wait_allrefs - wait until all references are gone.
+ * @dev: target net_device
  *
  * This is called when unregistering network devices.
  *
@@ -5752,9 +5784,12 @@
 
 			/* Rebroadcast unregister notification */
 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-			/* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
-			 * should have already handle it the first time */
 
+			__rtnl_unlock();
+			rcu_barrier();
+			rtnl_lock();
+
+			call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
 				     &dev->state)) {
 				/* We must not have linkwatch events
@@ -5816,9 +5851,8 @@
 
 	__rtnl_unlock();
 
-	/* Wait for rcu callbacks to finish before attempting to drain
-	 * the device list.  This usually avoids a 250ms wait.
-	 */
+
+	/* Wait for rcu callbacks to finish before next phase */
 	if (!list_empty(&list))
 		rcu_barrier();
 
@@ -5827,6 +5861,10 @@
 			= list_first_entry(&list, struct net_device, todo_list);
 		list_del(&dev->todo_list);
 
+		rtnl_lock();
+		call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
+		__rtnl_unlock();
+
 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
 			pr_err("network todo '%s' but state %d\n",
 			       dev->name, dev->reg_state);
@@ -5987,6 +6025,7 @@
 	dev_net_set(dev, &init_net);
 
 	dev->gso_max_size = GSO_MAX_SIZE;
+	dev->gso_max_segs = GSO_MAX_SEGS;
 
 	INIT_LIST_HEAD(&dev->napi_list);
 	INIT_LIST_HEAD(&dev->unreg_list);
@@ -6220,7 +6259,6 @@
 	   the device is just moving and can keep their slaves up.
 	*/
 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
 	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
 
 	/*
diff --git a/net/core/dst.c b/net/core/dst.c
index 069d51d..f6593d2 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -149,7 +149,15 @@
 }
 EXPORT_SYMBOL(dst_discard);
 
-const u32 dst_default_metrics[RTAX_MAX];
+const u32 dst_default_metrics[RTAX_MAX + 1] = {
+	/* This initializer is needed to force linker to place this variable
+	 * into const section. Otherwise it might end into bss section.
+	 * We really want to avoid false sharing on this variable, and catch
+	 * any writes on it.
+	 */
+	[RTAX_MAX] = 0xdeadbeef,
+};
+
 
 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
 		int initial_ref, int initial_obsolete, unsigned short flags)
@@ -366,7 +374,7 @@
 	struct dst_entry *dst, *last = NULL;
 
 	switch (event) {
-	case NETDEV_UNREGISTER:
+	case NETDEV_UNREGISTER_FINAL:
 	case NETDEV_DOWN:
 		mutex_lock(&dst_gc_mutex);
 		for (dst = dst_busy_list; dst; dst = dst->next) {
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index c3519c6..a019222 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -76,6 +76,14 @@
 }
 
 
+void linkwatch_init_dev(struct net_device *dev)
+{
+	/* Handle pre-registration link state changes */
+	if (!netif_carrier_ok(dev) || netif_dormant(dev))
+		rfc2863_policy(dev);
+}
+
+
 static bool linkwatch_urgent_event(struct net_device *dev)
 {
 	if (!netif_running(dev))
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b4c90e4..346b1eb 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -26,6 +26,7 @@
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/if_vlan.h>
 #include <net/tcp.h>
 #include <net/udp.h>
 #include <asm/unaligned.h>
@@ -54,7 +55,7 @@
 	 MAX_UDP_CHUNK)
 
 static void zap_completion_queue(void);
-static void arp_reply(struct sk_buff *skb);
+static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
 
 static unsigned int carrier_timeout = 4;
 module_param(carrier_timeout, uint, 0644);
@@ -167,15 +168,24 @@
 	struct napi_struct *napi;
 	int budget = 16;
 
+	WARN_ON_ONCE(!irqs_disabled());
+
 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
+		local_irq_enable();
 		if (napi->poll_owner != smp_processor_id() &&
 		    spin_trylock(&napi->poll_lock)) {
-			budget = poll_one_napi(dev->npinfo, napi, budget);
+			rcu_read_lock_bh();
+			budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
+					       napi, budget);
+			rcu_read_unlock_bh();
 			spin_unlock(&napi->poll_lock);
 
-			if (!budget)
+			if (!budget) {
+				local_irq_disable();
 				break;
+			}
 		}
+		local_irq_disable();
 	}
 }
 
@@ -185,13 +195,14 @@
 		struct sk_buff *skb;
 
 		while ((skb = skb_dequeue(&npi->arp_tx)))
-			arp_reply(skb);
+			netpoll_arp_reply(skb, npi);
 	}
 }
 
 static void netpoll_poll_dev(struct net_device *dev)
 {
 	const struct net_device_ops *ops;
+	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
 
 	if (!dev || !netif_running(dev))
 		return;
@@ -206,17 +217,18 @@
 	poll_napi(dev);
 
 	if (dev->flags & IFF_SLAVE) {
-		if (dev->npinfo) {
+		if (ni) {
 			struct net_device *bond_dev = dev->master;
 			struct sk_buff *skb;
-			while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
+			struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
+			while ((skb = skb_dequeue(&ni->arp_tx))) {
 				skb->dev = bond_dev;
-				skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
+				skb_queue_tail(&bond_ni->arp_tx, skb);
 			}
 		}
 	}
 
-	service_arp_queue(dev->npinfo);
+	service_arp_queue(ni);
 
 	zap_completion_queue();
 }
@@ -302,6 +314,7 @@
 	return 0;
 }
 
+/* call with IRQ disabled */
 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 			     struct net_device *dev)
 {
@@ -309,8 +322,11 @@
 	unsigned long tries;
 	const struct net_device_ops *ops = dev->netdev_ops;
 	/* It is up to the caller to keep npinfo alive. */
-	struct netpoll_info *npinfo = np->dev->npinfo;
+	struct netpoll_info *npinfo;
 
+	WARN_ON_ONCE(!irqs_disabled());
+
+	npinfo = rcu_dereference_bh(np->dev->npinfo);
 	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
 		__kfree_skb(skb);
 		return;
@@ -319,16 +335,22 @@
 	/* don't get messages out of order, and no recursion */
 	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
 		struct netdev_queue *txq;
-		unsigned long flags;
 
 		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 
-		local_irq_save(flags);
 		/* try until next clock tick */
 		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
 		     tries > 0; --tries) {
 			if (__netif_tx_trylock(txq)) {
 				if (!netif_xmit_stopped(txq)) {
+					if (vlan_tx_tag_present(skb) &&
+					    !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
+						skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
+						if (unlikely(!skb))
+							break;
+						skb->vlan_tci = 0;
+					}
+
 					status = ops->ndo_start_xmit(skb, dev);
 					if (status == NETDEV_TX_OK)
 						txq_trans_update(txq);
@@ -347,10 +369,9 @@
 		}
 
 		WARN_ONCE(!irqs_disabled(),
-			"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
+			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
 			dev->name, ops->ndo_start_xmit);
 
-		local_irq_restore(flags);
 	}
 
 	if (status != NETDEV_TX_OK) {
@@ -423,9 +444,8 @@
 }
 EXPORT_SYMBOL(netpoll_send_udp);
 
-static void arp_reply(struct sk_buff *skb)
+static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
 {
-	struct netpoll_info *npinfo = skb->dev->npinfo;
 	struct arphdr *arp;
 	unsigned char *arp_ptr;
 	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
@@ -543,13 +563,12 @@
 	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 }
 
-int __netpoll_rx(struct sk_buff *skb)
+int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
 {
 	int proto, len, ulen;
 	int hits = 0;
 	const struct iphdr *iph;
 	struct udphdr *uh;
-	struct netpoll_info *npinfo = skb->dev->npinfo;
 	struct netpoll *np, *tmp;
 
 	if (list_empty(&npinfo->rx_np))
@@ -565,6 +584,12 @@
 		return 1;
 	}
 
+	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+		skb = vlan_untag(skb);
+		if (unlikely(!skb))
+			goto out;
+	}
+
 	proto = ntohs(eth_hdr(skb)->h_proto);
 	if (proto != ETH_P_IP)
 		goto out;
@@ -715,7 +740,7 @@
 }
 EXPORT_SYMBOL(netpoll_parse_options);
 
-int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
 {
 	struct netpoll_info *npinfo;
 	const struct net_device_ops *ops;
@@ -734,7 +759,7 @@
 	}
 
 	if (!ndev->npinfo) {
-		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
+		npinfo = kmalloc(sizeof(*npinfo), gfp);
 		if (!npinfo) {
 			err = -ENOMEM;
 			goto out;
@@ -752,7 +777,7 @@
 
 		ops = np->dev->netdev_ops;
 		if (ops->ndo_netpoll_setup) {
-			err = ops->ndo_netpoll_setup(ndev, npinfo);
+			err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
 			if (err)
 				goto free_npinfo;
 		}
@@ -857,7 +882,7 @@
 	refill_skbs();
 
 	rtnl_lock();
-	err = __netpoll_setup(np, ndev);
+	err = __netpoll_setup(np, ndev, GFP_KERNEL);
 	rtnl_unlock();
 
 	if (err)
@@ -878,6 +903,24 @@
 }
 core_initcall(netpoll_init);
 
+static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
+{
+	struct netpoll_info *npinfo =
+			container_of(rcu_head, struct netpoll_info, rcu);
+
+	skb_queue_purge(&npinfo->arp_tx);
+	skb_queue_purge(&npinfo->txq);
+
+	/* we can't call cancel_delayed_work_sync here, as we are in softirq */
+	cancel_delayed_work(&npinfo->tx_work);
+
+	/* clean after last, unfinished work */
+	__skb_queue_purge(&npinfo->txq);
+	/* now cancel it again */
+	cancel_delayed_work(&npinfo->tx_work);
+	kfree(npinfo);
+}
+
 void __netpoll_cleanup(struct netpoll *np)
 {
 	struct netpoll_info *npinfo;
@@ -903,21 +946,25 @@
 			ops->ndo_netpoll_cleanup(np->dev);
 
 		RCU_INIT_POINTER(np->dev->npinfo, NULL);
-
-		/* avoid racing with NAPI reading npinfo */
-		synchronize_rcu_bh();
-
-		skb_queue_purge(&npinfo->arp_tx);
-		skb_queue_purge(&npinfo->txq);
-		cancel_delayed_work_sync(&npinfo->tx_work);
-
-		/* clean after last, unfinished work */
-		__skb_queue_purge(&npinfo->txq);
-		kfree(npinfo);
+		call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
 	}
 }
 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
 
+static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
+{
+	struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
+
+	__netpoll_cleanup(np);
+	kfree(np);
+}
+
+void __netpoll_free_rcu(struct netpoll *np)
+{
+	call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
+}
+EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
+
 void netpoll_cleanup(struct netpoll *np)
 {
 	if (!np->dev)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index ed0c043..c75e3f9 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -101,12 +101,10 @@
 	u32 max_len;
 	struct netprio_map *map;
 
-	rtnl_lock();
 	max_len = atomic_read(&max_prioidx) + 1;
 	map = rtnl_dereference(dev->priomap);
 	if (!map || map->priomap_len < max_len)
 		ret = extend_netdev_table(dev, max_len);
-	rtnl_unlock();
 
 	return ret;
 }
@@ -256,17 +254,17 @@
 	if (!dev)
 		goto out_free_devname;
 
+	rtnl_lock();
 	ret = write_update_netdev_table(dev);
 	if (ret < 0)
 		goto out_put_dev;
 
-	rcu_read_lock();
-	map = rcu_dereference(dev->priomap);
+	map = rtnl_dereference(dev->priomap);
 	if (map)
 		map->priomap[prioidx] = priority;
-	rcu_read_unlock();
 
 out_put_dev:
+	rtnl_unlock();
 	dev_put(dev);
 
 out_free_devname:
@@ -277,12 +275,6 @@
 void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 {
 	struct task_struct *p;
-	char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
-
-	if (!tmp) {
-		pr_warn("Unable to attach cgrp due to alloc failure!\n");
-		return;
-	}
 
 	cgroup_taskset_for_each(p, cgrp, tset) {
 		unsigned int fd;
@@ -296,32 +288,24 @@
 			continue;
 		}
 
-		rcu_read_lock();
+		spin_lock(&files->file_lock);
 		fdt = files_fdtable(files);
 		for (fd = 0; fd < fdt->max_fds; fd++) {
-			char *path;
 			struct file *file;
 			struct socket *sock;
-			unsigned long s;
-			int rv, err = 0;
+			int err;
 
 			file = fcheck_files(files, fd);
 			if (!file)
 				continue;
 
-			path = d_path(&file->f_path, tmp, PAGE_SIZE);
-			rv = sscanf(path, "socket:[%lu]", &s);
-			if (rv <= 0)
-				continue;
-
 			sock = sock_from_file(file, &err);
-			if (!err)
+			if (sock)
 				sock_update_netprioidx(sock->sk, p);
 		}
-		rcu_read_unlock();
+		spin_unlock(&files->file_lock);
 		task_unlock(p);
 	}
-	kfree(tmp);
 }
 
 static struct cftype ss_files[] = {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2c5a0a0..c64efcf 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -618,7 +618,7 @@
 		       long expires, u32 error)
 {
 	struct rta_cacheinfo ci = {
-		.rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
+		.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
 		.rta_used = dst->__use,
 		.rta_clntref = atomic_read(&(dst->__refcnt)),
 		.rta_error = error,
@@ -1812,8 +1812,6 @@
 			return -ENODEV;
 		}
 
-		if (ifm->ifi_index)
-			return -EOPNOTSUPP;
 		if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
 			return -EOPNOTSUPP;
 
@@ -1839,10 +1837,14 @@
 			return PTR_ERR(dest_net);
 
 		dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
-
-		if (IS_ERR(dev))
+		if (IS_ERR(dev)) {
 			err = PTR_ERR(dev);
-		else if (ops->newlink)
+			goto out;
+		}
+
+		dev->ifindex = ifm->ifi_index;
+
+		if (ops->newlink)
 			err = ops->newlink(net, dev, tb, data);
 		else
 			err = register_netdevice(dev);
@@ -2356,7 +2358,7 @@
 	case NETDEV_PRE_TYPE_CHANGE:
 	case NETDEV_GOING_DOWN:
 	case NETDEV_UNREGISTER:
-	case NETDEV_UNREGISTER_BATCH:
+	case NETDEV_UNREGISTER_FINAL:
 	case NETDEV_RELEASE:
 	case NETDEV_JOIN:
 		break;
diff --git a/net/core/scm.c b/net/core/scm.c
index 5472ae7..6ab491d 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -280,6 +280,7 @@
 	for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
 	     i++, cmfptr++)
 	{
+		struct socket *sock;
 		int new_fd;
 		err = security_file_receive(fp[i]);
 		if (err)
@@ -296,6 +297,9 @@
 		}
 		/* Bump the usage count and install the file. */
 		get_file(fp[i]);
+		sock = sock_from_file(fp[i], &err);
+		if (sock)
+			sock_update_netprioidx(sock->sk, current);
 		fd_install(new_fd, fp[i]);
 	}
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 5c6a435..d765156 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1230,7 +1230,7 @@
 	rcu_read_lock();  /* doing current task, which cannot vanish. */
 	classid = task_cls_classid(current);
 	rcu_read_unlock();
-	if (classid && classid != sk->sk_classid)
+	if (classid != sk->sk_classid)
 		sk->sk_classid = classid;
 }
 EXPORT_SYMBOL(sock_update_classid);
@@ -1458,6 +1458,7 @@
 		} else {
 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
 			sk->sk_gso_max_size = dst->dev->gso_max_size;
+			sk->sk_gso_max_segs = dst->dev->gso_max_segs;
 		}
 	}
 }
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 75c3582..fb85d37 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -246,7 +246,7 @@
 					u32 __user *optval, int __user *optlen)
 {
 	int rc = -ENOPROTOOPT;
-	if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
+	if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
 		rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
 						 optval, optlen);
 	return rc;
@@ -257,7 +257,7 @@
 					u32 __user *optval, int __user *optlen)
 {
 	int rc = -ENOPROTOOPT;
-	if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
+	if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
 		rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
 						 optval, optlen);
 	return rc;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index d65e987..119c043 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -535,6 +535,7 @@
 	case DCCP_SOCKOPT_CCID_TX_INFO:
 		if (len < sizeof(tfrc))
 			return -EINVAL;
+		memset(&tfrc, 0, sizeof(tfrc));
 		tfrc.tfrctx_x	   = hc->tx_x;
 		tfrc.tfrctx_x_recv = hc->tx_x_recv;
 		tfrc.tfrctx_x_calc = hc->tx_x_calc;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 85a3604..c855e8d 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -961,7 +961,7 @@
 		.saddr = oldflp->saddr,
 		.flowidn_scope = RT_SCOPE_UNIVERSE,
 		.flowidn_mark = oldflp->flowidn_mark,
-		.flowidn_iif = init_net.loopback_dev->ifindex,
+		.flowidn_iif = LOOPBACK_IFINDEX,
 		.flowidn_oif = oldflp->flowidn_oif,
 	};
 	struct dn_route *rt = NULL;
@@ -979,7 +979,7 @@
 		       "dn_route_output_slow: dst=%04x src=%04x mark=%d"
 		       " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
 		       le16_to_cpu(oldflp->saddr),
-		       oldflp->flowidn_mark, init_net.loopback_dev->ifindex,
+		       oldflp->flowidn_mark, LOOPBACK_IFINDEX,
 		       oldflp->flowidn_oif);
 
 	/* If we have an output interface, verify its a DECnet device */
@@ -1042,7 +1042,7 @@
 			if (!fld.daddr)
 				goto out;
 		}
-		fld.flowidn_oif = init_net.loopback_dev->ifindex;
+		fld.flowidn_oif = LOOPBACK_IFINDEX;
 		res.type = RTN_LOCAL;
 		goto make_route;
 	}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index fe4582c..6681ccf 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1364,7 +1364,7 @@
 	if (*(u8 *)iph != 0x45)
 		goto out_unlock;
 
-	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+	if (unlikely(ip_fast_csum((u8 *)iph, 5)))
 		goto out_unlock;
 
 	id = ntohl(*(__be32 *)&iph->id);
@@ -1380,7 +1380,6 @@
 		iph2 = ip_hdr(p);
 
 		if ((iph->protocol ^ iph2->protocol) |
-		    (iph->tos ^ iph2->tos) |
 		    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
 		    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
 			NAPI_GRO_CB(p)->same_flow = 0;
@@ -1390,6 +1389,7 @@
 		/* All fields must match except length and checksum. */
 		NAPI_GRO_CB(p)->flush |=
 			(iph->ttl ^ iph2->ttl) |
+			(iph->tos ^ iph2->tos) |
 			((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 
 		NAPI_GRO_CB(p)->flush |= flush;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 44bf82e..adf273f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -94,25 +94,22 @@
 	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 };
 
-/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
- * value.  So if you change this define, make appropriate changes to
- * inet_addr_hash as well.
- */
-#define IN4_ADDR_HSIZE	256
+#define IN4_ADDR_HSIZE_SHIFT	8
+#define IN4_ADDR_HSIZE		(1U << IN4_ADDR_HSIZE_SHIFT)
+
 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 static DEFINE_SPINLOCK(inet_addr_hash_lock);
 
-static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+static u32 inet_addr_hash(struct net *net, __be32 addr)
 {
-	u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+	u32 val = (__force u32) addr ^ net_hash_mix(net);
 
-	return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
-		(IN4_ADDR_HSIZE - 1));
+	return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
 }
 
 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 {
-	unsigned int hash = inet_addr_hash(net, ifa->ifa_local);
+	u32 hash = inet_addr_hash(net, ifa->ifa_local);
 
 	spin_lock(&inet_addr_hash_lock);
 	hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
@@ -136,18 +133,18 @@
  */
 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 {
-	unsigned int hash = inet_addr_hash(net, addr);
+	u32 hash = inet_addr_hash(net, addr);
 	struct net_device *result = NULL;
 	struct in_ifaddr *ifa;
 	struct hlist_node *node;
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
-		struct net_device *dev = ifa->ifa_dev->dev;
-
-		if (!net_eq(dev_net(dev), net))
-			continue;
 		if (ifa->ifa_local == addr) {
+			struct net_device *dev = ifa->ifa_dev->dev;
+
+			if (!net_eq(dev_net(dev), net))
+				continue;
 			result = dev;
 			break;
 		}
@@ -182,10 +179,10 @@
 static void devinet_sysctl_register(struct in_device *idev);
 static void devinet_sysctl_unregister(struct in_device *idev);
 #else
-static inline void devinet_sysctl_register(struct in_device *idev)
+static void devinet_sysctl_register(struct in_device *idev)
 {
 }
-static inline void devinet_sysctl_unregister(struct in_device *idev)
+static void devinet_sysctl_unregister(struct in_device *idev)
 {
 }
 #endif
@@ -205,7 +202,7 @@
 	kfree(ifa);
 }
 
-static inline void inet_free_ifa(struct in_ifaddr *ifa)
+static void inet_free_ifa(struct in_ifaddr *ifa)
 {
 	call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 }
@@ -659,7 +656,7 @@
  *	Determine a default network mask, based on the IP address.
  */
 
-static inline int inet_abc_len(__be32 addr)
+static int inet_abc_len(__be32 addr)
 {
 	int rc = -1;	/* Something else, probably a multicast. */
 
@@ -1124,7 +1121,7 @@
 	}
 }
 
-static inline bool inetdev_valid_mtu(unsigned int mtu)
+static bool inetdev_valid_mtu(unsigned int mtu)
 {
 	return mtu >= 68;
 }
@@ -1239,7 +1236,7 @@
 	.notifier_call = inetdev_event,
 };
 
-static inline size_t inet_nlmsg_size(void)
+static size_t inet_nlmsg_size(void)
 {
 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
 	       + nla_total_size(4) /* IFA_ADDRESS */
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c43ae3f..acdee32 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -218,7 +218,7 @@
 	scope = RT_SCOPE_UNIVERSE;
 	if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
 		fl4.flowi4_oif = 0;
-		fl4.flowi4_iif = net->loopback_dev->ifindex;
+		fl4.flowi4_iif = LOOPBACK_IFINDEX;
 		fl4.daddr = ip_hdr(skb)->saddr;
 		fl4.saddr = 0;
 		fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
@@ -1041,7 +1041,7 @@
 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
 	struct net_device *dev = ptr;
-	struct in_device *in_dev = __in_dev_get_rtnl(dev);
+	struct in_device *in_dev;
 	struct net *net = dev_net(dev);
 
 	if (event == NETDEV_UNREGISTER) {
@@ -1050,8 +1050,7 @@
 		return NOTIFY_DONE;
 	}
 
-	if (!in_dev)
-		return NOTIFY_DONE;
+	in_dev = __in_dev_get_rtnl(dev);
 
 	switch (event) {
 	case NETDEV_UP:
@@ -1062,16 +1061,14 @@
 		fib_sync_up(dev);
 #endif
 		atomic_inc(&net->ipv4.dev_addr_genid);
-		rt_cache_flush(dev_net(dev), -1);
+		rt_cache_flush(net, -1);
 		break;
 	case NETDEV_DOWN:
 		fib_disable_ip(dev, 0, 0);
 		break;
 	case NETDEV_CHANGEMTU:
 	case NETDEV_CHANGE:
-		rt_cache_flush(dev_net(dev), 0);
-		break;
-	case NETDEV_UNREGISTER_BATCH:
+		rt_cache_flush(net, 0);
 		break;
 	}
 	return NOTIFY_DONE;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index f0cdb30..3c820da 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -367,7 +367,7 @@
 
 static inline void free_leaf(struct leaf *l)
 {
-	call_rcu_bh(&l->rcu, __leaf_free_rcu);
+	call_rcu(&l->rcu, __leaf_free_rcu);
 }
 
 static inline void free_leaf_info(struct leaf_info *leaf)
@@ -1550,7 +1550,8 @@
 		 * state.directly.
 		 */
 		if (pref_mismatch) {
-			int mp = KEYLENGTH - fls(pref_mismatch);
+			/* fls(x) = __fls(x) + 1 */
+			int mp = KEYLENGTH - __fls(pref_mismatch) - 1;
 
 			if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
 				goto backtrace;
@@ -1655,7 +1656,12 @@
 	if (!l)
 		return -ESRCH;
 
-	fa_head = get_fa_head(l, plen);
+	li = find_leaf_info(l, plen);
+
+	if (!li)
+		return -ESRCH;
+
+	fa_head = &li->falh;
 	fa = fib_find_alias(fa_head, tos, 0);
 
 	if (!fa)
@@ -1691,9 +1697,6 @@
 	rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
 		  &cfg->fc_nlinfo, 0);
 
-	l = fib_find_node(t, key);
-	li = find_leaf_info(l, plen);
-
 	list_del_rcu(&fa->fa_list);
 
 	if (!plen)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6699f23..0b5580c 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2435,6 +2435,8 @@
 		struct ip_mc_list *im = (struct ip_mc_list *)v;
 		struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
 		char   *querier;
+		long delta;
+
 #ifdef CONFIG_IP_MULTICAST
 		querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
 			  IGMP_V2_SEEN(state->in_dev) ? "V2" :
@@ -2448,11 +2450,12 @@
 				   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
 		}
 
+		delta = im->timer.expires - jiffies;
 		seq_printf(seq,
 			   "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
 			   im->multiaddr, im->users,
-			   im->tm_running, im->tm_running ?
-			   jiffies_to_clock_t(im->timer.expires-jiffies) : 0,
+			   im->tm_running,
+			   im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
 			   im->reporter);
 	}
 	return 0;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index db0cf17..7f75f21 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -404,12 +404,15 @@
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
 	struct inet_sock *newinet = inet_sk(newsk);
-	struct ip_options_rcu *opt = ireq->opt;
+	struct ip_options_rcu *opt;
 	struct net *net = sock_net(sk);
 	struct flowi4 *fl4;
 	struct rtable *rt;
 
 	fl4 = &newinet->cork.fl.u.ip4;
+
+	rcu_read_lock();
+	opt = rcu_dereference(newinet->inet_opt);
 	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -421,11 +424,13 @@
 		goto no_route;
 	if (opt && opt->opt.is_strictroute && rt->rt_gateway)
 		goto route_err;
+	rcu_read_unlock();
 	return &rt->dst;
 
 route_err:
 	ip_rt_put(rt);
 no_route:
+	rcu_read_unlock();
 	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
 	return NULL;
 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ba39a52..c196d74 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -197,7 +197,7 @@
 	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
 	if (unlikely(!neigh))
 		neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
-	if (neigh) {
+	if (!IS_ERR(neigh)) {
 		int res = dst_neigh_output(dst, neigh, skb);
 
 		rcu_read_unlock_bh();
@@ -1338,10 +1338,10 @@
 	iph->ihl = 5;
 	iph->tos = inet->tos;
 	iph->frag_off = df;
-	ip_select_ident(iph, &rt->dst, sk);
 	iph->ttl = ttl;
 	iph->protocol = sk->sk_protocol;
 	ip_copy_addrs(iph, fl4);
+	ip_select_ident(iph, &rt->dst, sk);
 
 	if (opt) {
 		iph->ihl += opt->optlen>>2;
@@ -1366,9 +1366,8 @@
 	return skb;
 }
 
-int ip_send_skb(struct sk_buff *skb)
+int ip_send_skb(struct net *net, struct sk_buff *skb)
 {
-	struct net *net = sock_net(skb->sk);
 	int err;
 
 	err = ip_local_out(skb);
@@ -1391,7 +1390,7 @@
 		return 0;
 
 	/* Netfilter gets whole the not fragmented skb. */
-	return ip_send_skb(skb);
+	return ip_send_skb(sock_net(sk), skb);
 }
 
 /*
@@ -1536,6 +1535,7 @@
 			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
 								arg->csum));
 		nskb->ip_summed = CHECKSUM_NONE;
+		skb_orphan(nskb);
 		skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
 		ip_push_pending_frames(sk, &fl4);
 	}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8eec8f4..3a57570 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1798,7 +1798,7 @@
 		.flowi4_oif = (rt_is_output_route(rt) ?
 			       skb->dev->ifindex : 0),
 		.flowi4_iif = (rt_is_output_route(rt) ?
-			       net->loopback_dev->ifindex :
+			       LOOPBACK_IFINDEX :
 			       skb->dev->ifindex),
 		.flowi4_mark = skb->mark,
 	};
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 31371be..c301300 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -85,7 +85,7 @@
 			return ipv4_is_local_multicast(iph->daddr) ^ invert;
 		flow.flowi4_iif = 0;
 	} else {
-		flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex;
+		flow.flowi4_iif = LOOPBACK_IFINDEX;
 	}
 
 	flow.daddr = iph->saddr;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 851acec8..6b3da5c 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -69,9 +69,7 @@
 	net->ipv4.iptable_filter =
 		ipt_register_table(net, &packet_filter, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv4.iptable_filter))
-		return PTR_ERR(net->ipv4.iptable_filter);
-	return 0;
+	return PTR_RET(net->ipv4.iptable_filter);
 }
 
 static void __net_exit iptable_filter_net_exit(struct net *net)
@@ -96,14 +94,10 @@
 	filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
 	if (IS_ERR(filter_ops)) {
 		ret = PTR_ERR(filter_ops);
-		goto cleanup_table;
+		unregister_pernet_subsys(&iptable_filter_net_ops);
 	}
 
 	return ret;
-
- cleanup_table:
-	unregister_pernet_subsys(&iptable_filter_net_ops);
-	return ret;
 }
 
 static void __exit iptable_filter_fini(void)
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index aef5d1f..85d88f2 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -104,9 +104,7 @@
 	net->ipv4.iptable_mangle =
 		ipt_register_table(net, &packet_mangler, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv4.iptable_mangle))
-		return PTR_ERR(net->ipv4.iptable_mangle);
-	return 0;
+	return PTR_RET(net->ipv4.iptable_mangle);
 }
 
 static void __net_exit iptable_mangle_net_exit(struct net *net)
@@ -131,14 +129,10 @@
 	mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
 	if (IS_ERR(mangle_ops)) {
 		ret = PTR_ERR(mangle_ops);
-		goto cleanup_table;
+		unregister_pernet_subsys(&iptable_mangle_net_ops);
 	}
 
 	return ret;
-
- cleanup_table:
-	unregister_pernet_subsys(&iptable_mangle_net_ops);
-	return ret;
 }
 
 static void __exit iptable_mangle_fini(void)
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 07fb710..03d9696 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -48,9 +48,7 @@
 	net->ipv4.iptable_raw =
 		ipt_register_table(net, &packet_raw, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv4.iptable_raw))
-		return PTR_ERR(net->ipv4.iptable_raw);
-	return 0;
+	return PTR_RET(net->ipv4.iptable_raw);
 }
 
 static void __net_exit iptable_raw_net_exit(struct net *net)
@@ -75,14 +73,10 @@
 	rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
 	if (IS_ERR(rawtable_ops)) {
 		ret = PTR_ERR(rawtable_ops);
-		goto cleanup_table;
+		unregister_pernet_subsys(&iptable_raw_net_ops);
 	}
 
 	return ret;
-
- cleanup_table:
-	unregister_pernet_subsys(&iptable_raw_net_ops);
-	return ret;
 }
 
 static void __exit iptable_raw_fini(void)
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index be45bdc..b283d8e 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -66,10 +66,7 @@
 	net->ipv4.iptable_security =
 		ipt_register_table(net, &security_table, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv4.iptable_security))
-		return PTR_ERR(net->ipv4.iptable_security);
-
-	return 0;
+	return PTR_RET(net->ipv4.iptable_security);
 }
 
 static void __net_exit iptable_security_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index ea4a2381..4ad9cf1 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -148,7 +148,7 @@
 	if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
 				    hdr, NULL, &matchoff, &matchlen,
 				    &addr, &port) > 0) {
-		unsigned int matchend, poff, plen, buflen, n;
+		unsigned int olen, matchend, poff, plen, buflen, n;
 		char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
 
 		/* We're only interested in headers related to this
@@ -163,17 +163,18 @@
 				goto next;
 		}
 
+		olen = *datalen;
 		if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
 			      &addr, port))
 			return NF_DROP;
 
-		matchend = matchoff + matchlen;
+		matchend = matchoff + matchlen + *datalen - olen;
 
 		/* The maddr= parameter (RFC 2361) specifies where to send
 		 * the reply. */
 		if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
 					       "maddr=", &poff, &plen,
-					       &addr) > 0 &&
+					       &addr, true) > 0 &&
 		    addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
 		    addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
 			buflen = sprintf(buffer, "%pI4",
@@ -187,7 +188,7 @@
 		 * from which the server received the request. */
 		if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
 					       "received=", &poff, &plen,
-					       &addr) > 0 &&
+					       &addr, false) > 0 &&
 		    addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
 		    addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
 			buflen = sprintf(buffer, "%pI4",
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c035251..50f6d3a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -70,7 +70,6 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
-#include <linux/bootmem.h>
 #include <linux/string.h>
 #include <linux/socket.h>
 #include <linux/sockios.h>
@@ -80,7 +79,6 @@
 #include <linux/netdevice.h>
 #include <linux/proc_fs.h>
 #include <linux/init.h>
-#include <linux/workqueue.h>
 #include <linux/skbuff.h>
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
@@ -88,11 +86,9 @@
 #include <linux/mroute.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/random.h>
-#include <linux/jhash.h>
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
-#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
@@ -1591,11 +1587,14 @@
 	if (ipv4_is_zeronet(daddr))
 		goto martian_destination;
 
-	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
-		if (ipv4_is_loopback(daddr))
+	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
+	 * and call it once if daddr or/and saddr are loopback addresses
+	 */
+	if (ipv4_is_loopback(daddr)) {
+		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
 			goto martian_destination;
-
-		if (ipv4_is_loopback(saddr))
+	} else if (ipv4_is_loopback(saddr)) {
+		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
 			goto martian_source;
 	}
 
@@ -1620,7 +1619,7 @@
 
 	if (res.type == RTN_LOCAL) {
 		err = fib_validate_source(skb, saddr, daddr, tos,
-					  net->loopback_dev->ifindex,
+					  LOOPBACK_IFINDEX,
 					  dev, in_dev, &itag);
 		if (err < 0)
 			goto martian_source_keep_err;
@@ -1896,7 +1895,7 @@
 
 	orig_oif = fl4->flowi4_oif;
 
-	fl4->flowi4_iif = net->loopback_dev->ifindex;
+	fl4->flowi4_iif = LOOPBACK_IFINDEX;
 	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
 	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
 			 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -1985,7 +1984,7 @@
 		if (!fl4->daddr)
 			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
 		dev_out = net->loopback_dev;
-		fl4->flowi4_oif = net->loopback_dev->ifindex;
+		fl4->flowi4_oif = LOOPBACK_IFINDEX;
 		res.type = RTN_LOCAL;
 		flags |= RTCF_LOCAL;
 		goto make_route;
@@ -2032,7 +2031,6 @@
 		}
 		dev_out = net->loopback_dev;
 		fl4->flowi4_oif = dev_out->ifindex;
-		res.fi = NULL;
 		flags |= RTCF_LOCAL;
 		goto make_route;
 	}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e7e6eea..2109ff4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -811,7 +811,9 @@
 			   old_size_goal + mss_now > xmit_size_goal)) {
 			xmit_size_goal = old_size_goal;
 		} else {
-			tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+			tp->xmit_size_goal_segs =
+				min_t(u16, xmit_size_goal / mss_now,
+				      sk->sk_gso_max_segs);
 			xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
 		}
 	}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4d4db16..1432cdb 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -291,7 +291,8 @@
 	left = tp->snd_cwnd - in_flight;
 	if (sk_can_gso(sk) &&
 	    left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
-	    left * tp->mss_cache < sk->sk_gso_max_size)
+	    left * tp->mss_cache < sk->sk_gso_max_size &&
+	    left < sk->sk_gso_max_segs)
 		return true;
 	return left <= tcp_max_tso_deferred_mss(tp);
 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2fd2bc9..bcfccc5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -237,7 +237,11 @@
 			tcp_enter_quickack_mode((struct sock *)tp);
 		break;
 	case INET_ECN_CE:
-		tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+		if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+			/* Better not delay acks, sender can have a very low cwnd */
+			tcp_enter_quickack_mode((struct sock *)tp);
+			tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+		}
 		/* fallinto */
 	default:
 		tp->ecn_flags |= TCP_ECN_SEEN;
@@ -5392,6 +5396,8 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
+	if (unlikely(sk->sk_rx_dst == NULL))
+		inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
 	/*
 	 *	Header prediction.
 	 *	The code loosely follows the one in the famous
@@ -5605,7 +5611,7 @@
 	tcp_set_state(sk, TCP_ESTABLISHED);
 
 	if (skb != NULL) {
-		inet_sk_rx_dst_set(sk, skb);
+		icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
 		security_inet_conn_established(sk, skb);
 	}
 
@@ -5740,7 +5746,7 @@
 
 		TCP_ECN_rcv_synack(tp, th);
 
-		tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
+		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 		tcp_ack(sk, skb, FLAG_SLOWPATH);
 
 		/* Ok.. it's good. Set up sequence numbers and
@@ -5753,7 +5759,6 @@
 		 * never scaled.
 		 */
 		tp->snd_wnd = ntohs(th->window);
-		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
 		if (!tp->rx_opt.wscale_ok) {
 			tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 642be8a..36f02f9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -417,10 +417,12 @@
 
 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 			tp->mtu_info = info;
-			if (!sock_owned_by_user(sk))
+			if (!sock_owned_by_user(sk)) {
 				tcp_v4_mtu_reduced(sk);
-			else
-				set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
+			} else {
+				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
+					sock_hold(sk);
+			}
 			goto out;
 		}
 
@@ -1462,6 +1464,7 @@
 		goto exit_nonewsk;
 
 	newsk->sk_gso_type = SKB_GSO_TCPV4;
+	inet_sk_rx_dst_set(newsk, skb);
 
 	newtp		      = tcp_sk(newsk);
 	newinet		      = inet_sk(newsk);
@@ -1627,9 +1630,6 @@
 				sk->sk_rx_dst = NULL;
 			}
 		}
-		if (unlikely(sk->sk_rx_dst == NULL))
-			inet_sk_rx_dst_set(sk, skb);
-
 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
 			rsk = sk;
 			goto reset;
@@ -1872,10 +1872,21 @@
 	.twsk_destructor= tcp_twsk_destructor,
 };
 
+void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	dst_hold(dst);
+	sk->sk_rx_dst = dst;
+	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+}
+EXPORT_SYMBOL(inet_sk_rx_dst_set);
+
 const struct inet_connection_sock_af_ops ipv4_specific = {
 	.queue_xmit	   = ip_queue_xmit,
 	.send_check	   = tcp_v4_send_check,
 	.rebuild_header	   = inet_sk_rebuild_header,
+	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
 	.conn_request	   = tcp_v4_conn_request,
 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
 	.net_header_len	   = sizeof(struct iphdr),
@@ -2385,7 +2396,7 @@
 			 struct seq_file *f, int i, kuid_t uid, int *len)
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
-	int ttd = req->expires - jiffies;
+	long delta = req->expires - jiffies;
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
@@ -2397,7 +2408,7 @@
 		TCP_SYN_RECV,
 		0, 0, /* could print option size, but that is af dependent. */
 		1,    /* timers active (only the expire timer) */
-		jiffies_to_clock_t(ttd),
+		jiffies_delta_to_clock_t(delta),
 		req->retrans,
 		from_kuid_munged(seq_user_ns(f), uid),
 		0,  /* non standard timer */
@@ -2448,7 +2459,7 @@
 		tp->write_seq - tp->snd_una,
 		rx_queue,
 		timer_active,
-		jiffies_to_clock_t(timer_expires - jiffies),
+		jiffies_delta_to_clock_t(timer_expires - jiffies),
 		icsk->icsk_retransmits,
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
 		icsk->icsk_probes_out,
@@ -2467,10 +2478,7 @@
 {
 	__be32 dest, src;
 	__u16 destp, srcp;
-	int ttd = tw->tw_ttd - jiffies;
-
-	if (ttd < 0)
-		ttd = 0;
+	long delta = tw->tw_ttd - jiffies;
 
 	dest  = tw->tw_daddr;
 	src   = tw->tw_rcv_saddr;
@@ -2480,7 +2488,7 @@
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
-		3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
 		atomic_read(&tw->tw_refcnt), tw, len);
 }
 
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 2288a63..0abe67b 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -731,6 +731,18 @@
 
 static void __net_exit tcp_net_metrics_exit(struct net *net)
 {
+	unsigned int i;
+
+	for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
+		struct tcp_metrics_block *tm, *next;
+
+		tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
+		while (tm) {
+			next = rcu_dereference_protected(tm->tcpm_next, 1);
+			kfree(tm);
+			tm = next;
+		}
+	}
 	kfree(net->ipv4.tcp_metrics_hash);
 }
 
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 232a90c..6ff7f10 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -387,8 +387,6 @@
 		struct tcp_sock *oldtp = tcp_sk(sk);
 		struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
 
-		inet_sk_rx_dst_set(newsk, skb);
-
 		/* TCP Cookie Transactions require space for the cookie pair,
 		 * as it differs for each connection.  There is no need to
 		 * copy any s_data_payload stored at the original socket.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f1bcff..d046326 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -910,14 +910,18 @@
 	if (flags & (1UL << TCP_TSQ_DEFERRED))
 		tcp_tsq_handler(sk);
 
-	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED))
+	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
 		tcp_write_timer_handler(sk);
-
-	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED))
+		__sock_put(sk);
+	}
+	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
 		tcp_delack_timer_handler(sk);
-
-	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED))
+		__sock_put(sk);
+	}
+	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
 		sk->sk_prot->mtu_reduced(sk);
+		__sock_put(sk);
+	}
 }
 EXPORT_SYMBOL(tcp_release_cb);
 
@@ -940,7 +944,7 @@
  * We cant xmit new skbs from this context, as we might already
  * hold qdisc lock.
  */
-void tcp_wfree(struct sk_buff *skb)
+static void tcp_wfree(struct sk_buff *skb)
 {
 	struct sock *sk = skb->sk;
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -1522,21 +1526,21 @@
  * when we would be allowed to send the split-due-to-Nagle skb fully.
  */
 static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
-					unsigned int mss_now, unsigned int cwnd)
+					unsigned int mss_now, unsigned int max_segs)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
-	u32 needed, window, cwnd_len;
+	u32 needed, window, max_len;
 
 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
-	cwnd_len = mss_now * cwnd;
+	max_len = mss_now * max_segs;
 
-	if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
-		return cwnd_len;
+	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
+		return max_len;
 
 	needed = min(skb->len, window);
 
-	if (cwnd_len <= needed)
-		return cwnd_len;
+	if (max_len <= needed)
+		return max_len;
 
 	return needed - needed % mss_now;
 }
@@ -1765,7 +1769,8 @@
 	limit = min(send_win, cong_win);
 
 	/* If a full-sized TSO skb can be sent, do it. */
-	if (limit >= sk->sk_gso_max_size)
+	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
+			   sk->sk_gso_max_segs * tp->mss_cache))
 		goto send_now;
 
 	/* Middle in queue won't get any more data, full sendable already? */
@@ -1999,7 +2004,9 @@
 		limit = mss_now;
 		if (tso_segs > 1 && !tcp_urg_mode(tp))
 			limit = tcp_mss_split_point(sk, skb, mss_now,
-						    cwnd_quota);
+						    min_t(unsigned int,
+							  cwnd_quota,
+							  sk->sk_gso_max_segs));
 
 		if (skb->len > limit &&
 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6df36ad..b774a03 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -252,7 +252,8 @@
 		inet_csk(sk)->icsk_ack.blocked = 1;
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 		/* deleguate our work to tcp_release_cb() */
-		set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
+		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+			sock_hold(sk);
 	}
 	bh_unlock_sock(sk);
 	sock_put(sk);
@@ -481,7 +482,8 @@
 		tcp_write_timer_handler(sk);
 	} else {
 		/* deleguate our work to tcp_release_cb() */
-		set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
+		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+			sock_hold(sk);
 	}
 	bh_unlock_sock(sk);
 	sock_put(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 53b8981..c4e6432 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -758,7 +758,7 @@
 		uh->check = CSUM_MANGLED_0;
 
 send:
-	err = ip_send_skb(skb);
+	err = ip_send_skb(sock_net(sk), skb);
 	if (err) {
 		if (err == -ENOBUFS && !inet->recverr) {
 			UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 5728695..4f7fe72 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -201,6 +201,22 @@
 
 	  If unsure, say N.
 
+config IPV6_GRE
+	tristate "IPv6: GRE tunnel"
+	select IPV6_TUNNEL
+	---help---
+	  Tunneling means encapsulating data of one protocol type within
+	  another protocol and sending it over a channel that understands the
+	  encapsulating protocol. This particular tunneling driver implements
+	  GRE (Generic Routing Encapsulation) and at this time allows
+	  encapsulating of IPv4 or IPv6 over existing IPv6 infrastructure.
+	  This driver is useful if the other endpoint is a Cisco router: Cisco
+	  likes GRE much better than the other Linux tunneling driver ("IP
+	  tunneling" above). In addition, GRE allows multicast redistribution
+	  through the tunnel.
+
+	  Saying M here will produce a module called ip6_gre. If unsure, say N.
+
 config IPV6_MULTIPLE_TABLES
 	bool "IPv6: Multiple Routing Tables"
 	depends on EXPERIMENTAL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 686934a..b6d3f79 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -36,6 +36,7 @@
 
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
+obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
 
 obj-y += addrconf_core.o exthdrs_core.o
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 79181819..6bc85f7 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -494,8 +494,7 @@
 	struct net_device *dev;
 	struct inet6_dev *idev;
 
-	rcu_read_lock();
-	for_each_netdev_rcu(net, dev) {
+	for_each_netdev(net, dev) {
 		idev = __in6_dev_get(dev);
 		if (idev) {
 			int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -504,7 +503,6 @@
 				dev_forward_change(idev);
 		}
 	}
-	rcu_read_unlock();
 }
 
 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
new file mode 100644
index 0000000..424d11a
--- /dev/null
+++ b/net/ipv6/ip6_gre.c
@@ -0,0 +1,1792 @@
+/*
+ *	GRE over IPv6 protocol decoder.
+ *
+ *	Authors: Dmitry Kozlov (xeb@mail.ru)
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/mroute.h>
+#include <linux/init.h>
+#include <linux/in6.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/hash.h>
+#include <linux/if_tunnel.h>
+#include <linux/ip6_tunnel.h>
+
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/protocol.h>
+#include <net/addrconf.h>
+#include <net/arp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/rtnetlink.h>
+
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+#include <net/ip6_tunnel.h>
+
+
+#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
+#define IPV6_TCLASS_SHIFT 20
+
+#define HASH_SIZE_SHIFT  5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+
+static int ip6gre_net_id __read_mostly;
+struct ip6gre_net {
+	struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
+
+	struct net_device *fb_tunnel_dev;
+};
+
+static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
+static int ip6gre_tunnel_init(struct net_device *dev);
+static void ip6gre_tunnel_setup(struct net_device *dev);
+static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
+static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
+
+/* Tunnel hash table */
+
+/*
+   4 hash tables:
+
+   3: (remote,local)
+   2: (remote,*)
+   1: (*,local)
+   0: (*,*)
+
+   We require exact key match i.e. if a key is present in packet
+   it will match only tunnel with the same key; if it is not present,
+   it will match only keyless tunnel.
+
+   All keysless packets, if not matched configured keyless tunnels
+   will match fallback tunnel.
+ */
+
+#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
+static u32 HASH_ADDR(const struct in6_addr *addr)
+{
+	u32 hash = ipv6_addr_hash(addr);
+
+	return hash_32(hash, HASH_SIZE_SHIFT);
+}
+
+#define tunnels_r_l	tunnels[3]
+#define tunnels_r	tunnels[2]
+#define tunnels_l	tunnels[1]
+#define tunnels_wc	tunnels[0]
+/*
+ * Locking : hash tables are protected by RCU and RTNL
+ */
+
+#define for_each_ip_tunnel_rcu(start) \
+	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+	struct u64_stats_sync	syncp;
+};
+
+static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
+		struct rtnl_link_stats64 *tot)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+		unsigned int start;
+
+		do {
+			start = u64_stats_fetch_begin_bh(&tstats->syncp);
+			rx_packets = tstats->rx_packets;
+			tx_packets = tstats->tx_packets;
+			rx_bytes = tstats->rx_bytes;
+			tx_bytes = tstats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+		tot->rx_packets += rx_packets;
+		tot->tx_packets += tx_packets;
+		tot->rx_bytes   += rx_bytes;
+		tot->tx_bytes   += tx_bytes;
+	}
+
+	tot->multicast = dev->stats.multicast;
+	tot->rx_crc_errors = dev->stats.rx_crc_errors;
+	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+	tot->rx_length_errors = dev->stats.rx_length_errors;
+	tot->rx_errors = dev->stats.rx_errors;
+	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+	tot->tx_dropped = dev->stats.tx_dropped;
+	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+	tot->tx_errors = dev->stats.tx_errors;
+
+	return tot;
+}
+
+/* Given src, dst and key, find appropriate for input tunnel. */
+
+static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
+		const struct in6_addr *remote, const struct in6_addr *local,
+		__be32 key, __be16 gre_proto)
+{
+	struct net *net = dev_net(dev);
+	int link = dev->ifindex;
+	unsigned int h0 = HASH_ADDR(remote);
+	unsigned int h1 = HASH_KEY(key);
+	struct ip6_tnl *t, *cand = NULL;
+	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+	int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
+		       ARPHRD_ETHER : ARPHRD_IP6GRE;
+	int score, cand_score = 4;
+
+	for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
+		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+		    !ipv6_addr_equal(remote, &t->parms.raddr) ||
+		    key != t->parms.i_key ||
+		    !(t->dev->flags & IFF_UP))
+			continue;
+
+		if (t->dev->type != ARPHRD_IP6GRE &&
+		    t->dev->type != dev_type)
+			continue;
+
+		score = 0;
+		if (t->parms.link != link)
+			score |= 1;
+		if (t->dev->type != dev_type)
+			score |= 2;
+		if (score == 0)
+			return t;
+
+		if (score < cand_score) {
+			cand = t;
+			cand_score = score;
+		}
+	}
+
+	for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
+		if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
+		    key != t->parms.i_key ||
+		    !(t->dev->flags & IFF_UP))
+			continue;
+
+		if (t->dev->type != ARPHRD_IP6GRE &&
+		    t->dev->type != dev_type)
+			continue;
+
+		score = 0;
+		if (t->parms.link != link)
+			score |= 1;
+		if (t->dev->type != dev_type)
+			score |= 2;
+		if (score == 0)
+			return t;
+
+		if (score < cand_score) {
+			cand = t;
+			cand_score = score;
+		}
+	}
+
+	for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
+		if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
+			  (!ipv6_addr_equal(local, &t->parms.raddr) ||
+				 !ipv6_addr_is_multicast(local))) ||
+		    key != t->parms.i_key ||
+		    !(t->dev->flags & IFF_UP))
+			continue;
+
+		if (t->dev->type != ARPHRD_IP6GRE &&
+		    t->dev->type != dev_type)
+			continue;
+
+		score = 0;
+		if (t->parms.link != link)
+			score |= 1;
+		if (t->dev->type != dev_type)
+			score |= 2;
+		if (score == 0)
+			return t;
+
+		if (score < cand_score) {
+			cand = t;
+			cand_score = score;
+		}
+	}
+
+	for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
+		if (t->parms.i_key != key ||
+		    !(t->dev->flags & IFF_UP))
+			continue;
+
+		if (t->dev->type != ARPHRD_IP6GRE &&
+		    t->dev->type != dev_type)
+			continue;
+
+		score = 0;
+		if (t->parms.link != link)
+			score |= 1;
+		if (t->dev->type != dev_type)
+			score |= 2;
+		if (score == 0)
+			return t;
+
+		if (score < cand_score) {
+			cand = t;
+			cand_score = score;
+		}
+	}
+
+	if (cand != NULL)
+		return cand;
+
+	dev = ign->fb_tunnel_dev;
+	if (dev->flags & IFF_UP)
+		return netdev_priv(dev);
+
+	return NULL;
+}
+
+static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
+		const struct __ip6_tnl_parm *p)
+{
+	const struct in6_addr *remote = &p->raddr;
+	const struct in6_addr *local = &p->laddr;
+	unsigned int h = HASH_KEY(p->i_key);
+	int prio = 0;
+
+	if (!ipv6_addr_any(local))
+		prio |= 1;
+	if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
+		prio |= 2;
+		h ^= HASH_ADDR(remote);
+	}
+
+	return &ign->tunnels[prio][h];
+}
+
+static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
+		const struct ip6_tnl *t)
+{
+	return __ip6gre_bucket(ign, &t->parms);
+}
+
+static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+	struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
+
+	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
+	rcu_assign_pointer(*tp, t);
+}
+
+static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+	struct ip6_tnl __rcu **tp;
+	struct ip6_tnl *iter;
+
+	for (tp = ip6gre_bucket(ign, t);
+	     (iter = rtnl_dereference(*tp)) != NULL;
+	     tp = &iter->next) {
+		if (t == iter) {
+			rcu_assign_pointer(*tp, t->next);
+			break;
+		}
+	}
+}
+
+static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
+					   const struct __ip6_tnl_parm *parms,
+					   int type)
+{
+	const struct in6_addr *remote = &parms->raddr;
+	const struct in6_addr *local = &parms->laddr;
+	__be32 key = parms->i_key;
+	int link = parms->link;
+	struct ip6_tnl *t;
+	struct ip6_tnl __rcu **tp;
+	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+	for (tp = __ip6gre_bucket(ign, parms);
+	     (t = rtnl_dereference(*tp)) != NULL;
+	     tp = &t->next)
+		if (ipv6_addr_equal(local, &t->parms.laddr) &&
+		    ipv6_addr_equal(remote, &t->parms.raddr) &&
+		    key == t->parms.i_key &&
+		    link == t->parms.link &&
+		    type == t->dev->type)
+			break;
+
+	return t;
+}
+
+static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+		const struct __ip6_tnl_parm *parms, int create)
+{
+	struct ip6_tnl *t, *nt;
+	struct net_device *dev;
+	char name[IFNAMSIZ];
+	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+	t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
+	if (t || !create)
+		return t;
+
+	if (parms->name[0])
+		strlcpy(name, parms->name, IFNAMSIZ);
+	else
+		strcpy(name, "ip6gre%d");
+
+	dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
+	if (!dev)
+		return NULL;
+
+	dev_net_set(dev, net);
+
+	nt = netdev_priv(dev);
+	nt->parms = *parms;
+	dev->rtnl_link_ops = &ip6gre_link_ops;
+
+	nt->dev = dev;
+	ip6gre_tnl_link_config(nt, 1);
+
+	if (register_netdevice(dev) < 0)
+		goto failed_free;
+
+	/* Can use a lockless transmit, unless we generate output sequences */
+	if (!(nt->parms.o_flags & GRE_SEQ))
+		dev->features |= NETIF_F_LLTX;
+
+	dev_hold(dev);
+	ip6gre_tunnel_link(ign, nt);
+	return nt;
+
+failed_free:
+	free_netdev(dev);
+	return NULL;
+}
+
+static void ip6gre_tunnel_uninit(struct net_device *dev)
+{
+	struct net *net = dev_net(dev);
+	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+	ip6gre_tunnel_unlink(ign, netdev_priv(dev));
+	dev_put(dev);
+}
+
+
+static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+		u8 type, u8 code, int offset, __be32 info)
+{
+	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
+	__be16 *p = (__be16 *)(skb->data + offset);
+	int grehlen = offset + 4;
+	struct ip6_tnl *t;
+	__be16 flags;
+
+	flags = p[0];
+	if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
+		if (flags&(GRE_VERSION|GRE_ROUTING))
+			return;
+		if (flags&GRE_KEY) {
+			grehlen += 4;
+			if (flags&GRE_CSUM)
+				grehlen += 4;
+		}
+	}
+
+	/* If only 8 bytes returned, keyed message will be dropped here */
+	if (!pskb_may_pull(skb, grehlen))
+		return;
+	ipv6h = (const struct ipv6hdr *)skb->data;
+	p = (__be16 *)(skb->data + offset);
+
+	rcu_read_lock();
+
+	t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
+				flags & GRE_KEY ?
+				*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
+				p[1]);
+	if (t == NULL)
+		goto out;
+
+	switch (type) {
+		__u32 teli;
+		struct ipv6_tlv_tnl_enc_lim *tel;
+		__u32 mtu;
+	case ICMPV6_DEST_UNREACH:
+		net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
+				     t->parms.name);
+		break;
+	case ICMPV6_TIME_EXCEED:
+		if (code == ICMPV6_EXC_HOPLIMIT) {
+			net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+					     t->parms.name);
+		}
+		break;
+	case ICMPV6_PARAMPROB:
+		teli = 0;
+		if (code == ICMPV6_HDR_FIELD)
+			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
+
+		if (teli && teli == info - 2) {
+			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
+			if (tel->encap_limit == 0) {
+				net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+						     t->parms.name);
+			}
+		} else {
+			net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+					     t->parms.name);
+		}
+		break;
+	case ICMPV6_PKT_TOOBIG:
+		mtu = info - offset;
+		if (mtu < IPV6_MIN_MTU)
+			mtu = IPV6_MIN_MTU;
+		t->dev->mtu = mtu;
+		break;
+	}
+
+	if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
+		t->err_count++;
+	else
+		t->err_count = 1;
+	t->err_time = jiffies;
+out:
+	rcu_read_unlock();
+}
+
+static inline void ip6gre_ecn_decapsulate_ipv4(const struct ip6_tnl *t,
+		const struct ipv6hdr *ipv6h, struct sk_buff *skb)
+{
+	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
+
+	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
+		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
+
+	if (INET_ECN_is_ce(dsfield))
+		IP_ECN_set_ce(ip_hdr(skb));
+}
+
+static inline void ip6gre_ecn_decapsulate_ipv6(const struct ip6_tnl *t,
+		const struct ipv6hdr *ipv6h, struct sk_buff *skb)
+{
+	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
+		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
+
+	if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
+		IP6_ECN_set_ce(ipv6_hdr(skb));
+}
+
+static int ip6gre_rcv(struct sk_buff *skb)
+{
+	const struct ipv6hdr *ipv6h;
+	u8     *h;
+	__be16    flags;
+	__sum16   csum = 0;
+	__be32 key = 0;
+	u32    seqno = 0;
+	struct ip6_tnl *tunnel;
+	int    offset = 4;
+	__be16 gre_proto;
+
+	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
+		goto drop_nolock;
+
+	ipv6h = ipv6_hdr(skb);
+	h = skb->data;
+	flags = *(__be16 *)h;
+
+	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
+		/* - Version must be 0.
+		   - We do not support routing headers.
+		 */
+		if (flags&(GRE_VERSION|GRE_ROUTING))
+			goto drop_nolock;
+
+		if (flags&GRE_CSUM) {
+			switch (skb->ip_summed) {
+			case CHECKSUM_COMPLETE:
+				csum = csum_fold(skb->csum);
+				if (!csum)
+					break;
+				/* fall through */
+			case CHECKSUM_NONE:
+				skb->csum = 0;
+				csum = __skb_checksum_complete(skb);
+				skb->ip_summed = CHECKSUM_COMPLETE;
+			}
+			offset += 4;
+		}
+		if (flags&GRE_KEY) {
+			key = *(__be32 *)(h + offset);
+			offset += 4;
+		}
+		if (flags&GRE_SEQ) {
+			seqno = ntohl(*(__be32 *)(h + offset));
+			offset += 4;
+		}
+	}
+
+	gre_proto = *(__be16 *)(h + 2);
+
+	rcu_read_lock();
+	tunnel = ip6gre_tunnel_lookup(skb->dev,
+					  &ipv6h->saddr, &ipv6h->daddr, key,
+					  gre_proto);
+	if (tunnel) {
+		struct pcpu_tstats *tstats;
+
+		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+			goto drop;
+
+		if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
+			tunnel->dev->stats.rx_dropped++;
+			goto drop;
+		}
+
+		secpath_reset(skb);
+
+		skb->protocol = gre_proto;
+		/* WCCP version 1 and 2 protocol decoding.
+		 * - Change protocol to IP
+		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+		 */
+		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
+			skb->protocol = htons(ETH_P_IP);
+			if ((*(h + offset) & 0xF0) != 0x40)
+				offset += 4;
+		}
+
+		skb->mac_header = skb->network_header;
+		__pskb_pull(skb, offset);
+		skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
+		skb->pkt_type = PACKET_HOST;
+
+		if (((flags&GRE_CSUM) && csum) ||
+		    (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
+			tunnel->dev->stats.rx_crc_errors++;
+			tunnel->dev->stats.rx_errors++;
+			goto drop;
+		}
+		if (tunnel->parms.i_flags&GRE_SEQ) {
+			if (!(flags&GRE_SEQ) ||
+			    (tunnel->i_seqno &&
+					(s32)(seqno - tunnel->i_seqno) < 0)) {
+				tunnel->dev->stats.rx_fifo_errors++;
+				tunnel->dev->stats.rx_errors++;
+				goto drop;
+			}
+			tunnel->i_seqno = seqno + 1;
+		}
+
+		/* Warning: All skb pointers will be invalidated! */
+		if (tunnel->dev->type == ARPHRD_ETHER) {
+			if (!pskb_may_pull(skb, ETH_HLEN)) {
+				tunnel->dev->stats.rx_length_errors++;
+				tunnel->dev->stats.rx_errors++;
+				goto drop;
+			}
+
+			ipv6h = ipv6_hdr(skb);
+			skb->protocol = eth_type_trans(skb, tunnel->dev);
+			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+		}
+
+		tstats = this_cpu_ptr(tunnel->dev->tstats);
+		u64_stats_update_begin(&tstats->syncp);
+		tstats->rx_packets++;
+		tstats->rx_bytes += skb->len;
+		u64_stats_update_end(&tstats->syncp);
+
+		__skb_tunnel_rx(skb, tunnel->dev);
+
+		skb_reset_network_header(skb);
+		if (skb->protocol == htons(ETH_P_IP))
+			ip6gre_ecn_decapsulate_ipv4(tunnel, ipv6h, skb);
+		else if (skb->protocol == htons(ETH_P_IPV6))
+			ip6gre_ecn_decapsulate_ipv6(tunnel, ipv6h, skb);
+
+		netif_rx(skb);
+
+		rcu_read_unlock();
+		return 0;
+	}
+	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+drop:
+	rcu_read_unlock();
+drop_nolock:
+	kfree_skb(skb);
+	return 0;
+}
+
+struct ipv6_tel_txoption {
+	struct ipv6_txoptions ops;
+	__u8 dst_opt[8];
+};
+
+static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
+{
+	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
+
+	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
+	opt->dst_opt[3] = 1;
+	opt->dst_opt[4] = encap_limit;
+	opt->dst_opt[5] = IPV6_TLV_PADN;
+	opt->dst_opt[6] = 1;
+
+	opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
+	opt->ops.opt_nflen = 8;
+}
+
+static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
+			 struct net_device *dev,
+			 __u8 dsfield,
+			 struct flowi6 *fl6,
+			 int encap_limit,
+			 __u32 *pmtu)
+{
+	struct net *net = dev_net(dev);
+	struct ip6_tnl *tunnel = netdev_priv(dev);
+	struct net_device *tdev;    /* Device to other host */
+	struct ipv6hdr  *ipv6h;     /* Our new IP header */
+	unsigned int max_headroom;  /* The extra header space needed */
+	int    gre_hlen;
+	struct ipv6_tel_txoption opt;
+	int    mtu;
+	struct dst_entry *dst = NULL, *ndst = NULL;
+	struct net_device_stats *stats = &tunnel->dev->stats;
+	int err = -1;
+	u8 proto;
+	int pkt_len;
+	struct sk_buff *new_skb;
+
+	if (dev->type == ARPHRD_ETHER)
+		IPCB(skb)->flags = 0;
+
+	if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
+		gre_hlen = 0;
+		ipv6h = (struct ipv6hdr *)skb->data;
+		fl6->daddr = ipv6h->daddr;
+	} else {
+		gre_hlen = tunnel->hlen;
+		fl6->daddr = tunnel->parms.raddr;
+	}
+
+	if (!fl6->flowi6_mark)
+		dst = ip6_tnl_dst_check(tunnel);
+
+	if (!dst) {
+		ndst = ip6_route_output(net, NULL, fl6);
+
+		if (ndst->error)
+			goto tx_err_link_failure;
+		ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
+		if (IS_ERR(ndst)) {
+			err = PTR_ERR(ndst);
+			ndst = NULL;
+			goto tx_err_link_failure;
+		}
+		dst = ndst;
+	}
+
+	tdev = dst->dev;
+
+	if (tdev == dev) {
+		stats->collisions++;
+		net_warn_ratelimited("%s: Local routing loop detected!\n",
+				     tunnel->parms.name);
+		goto tx_err_dst_release;
+	}
+
+	mtu = dst_mtu(dst) - sizeof(*ipv6h);
+	if (encap_limit >= 0) {
+		max_headroom += 8;
+		mtu -= 8;
+	}
+	if (mtu < IPV6_MIN_MTU)
+		mtu = IPV6_MIN_MTU;
+	if (skb_dst(skb))
+		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+	if (skb->len > mtu) {
+		*pmtu = mtu;
+		err = -EMSGSIZE;
+		goto tx_err_dst_release;
+	}
+
+	if (tunnel->err_count > 0) {
+		if (time_before(jiffies,
+				tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
+			tunnel->err_count--;
+
+			dst_link_failure(skb);
+		} else
+			tunnel->err_count = 0;
+	}
+
+	max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
+
+	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
+	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
+		new_skb = skb_realloc_headroom(skb, max_headroom);
+		if (max_headroom > dev->needed_headroom)
+			dev->needed_headroom = max_headroom;
+		if (!new_skb)
+			goto tx_err_dst_release;
+
+		if (skb->sk)
+			skb_set_owner_w(new_skb, skb->sk);
+		consume_skb(skb);
+		skb = new_skb;
+	}
+
+	skb_dst_drop(skb);
+
+	if (fl6->flowi6_mark) {
+		skb_dst_set(skb, dst);
+		ndst = NULL;
+	} else {
+		skb_dst_set_noref(skb, dst);
+	}
+
+	skb->transport_header = skb->network_header;
+
+	proto = NEXTHDR_GRE;
+	if (encap_limit >= 0) {
+		init_tel_txopt(&opt, encap_limit);
+		ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
+	}
+
+	skb_push(skb, gre_hlen);
+	skb_reset_network_header(skb);
+
+	/*
+	 *	Push down and install the IP header.
+	 */
+	ipv6h = ipv6_hdr(skb);
+	*(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
+	dsfield = INET_ECN_encapsulate(0, dsfield);
+	ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
+	ipv6h->hop_limit = tunnel->parms.hop_limit;
+	ipv6h->nexthdr = proto;
+	ipv6h->saddr = fl6->saddr;
+	ipv6h->daddr = fl6->daddr;
+
+	((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
+	((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
+				   htons(ETH_P_TEB) : skb->protocol;
+
+	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
+		__be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
+
+		if (tunnel->parms.o_flags&GRE_SEQ) {
+			++tunnel->o_seqno;
+			*ptr = htonl(tunnel->o_seqno);
+			ptr--;
+		}
+		if (tunnel->parms.o_flags&GRE_KEY) {
+			*ptr = tunnel->parms.o_key;
+			ptr--;
+		}
+		if (tunnel->parms.o_flags&GRE_CSUM) {
+			*ptr = 0;
+			*(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
+				skb->len - sizeof(struct ipv6hdr));
+		}
+	}
+
+	nf_reset(skb);
+	pkt_len = skb->len;
+	err = ip6_local_out(skb);
+
+	if (net_xmit_eval(err) == 0) {
+		struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats);
+
+		tstats->tx_bytes += pkt_len;
+		tstats->tx_packets++;
+	} else {
+		stats->tx_errors++;
+		stats->tx_aborted_errors++;
+	}
+
+	if (ndst)
+		ip6_tnl_dst_store(tunnel, ndst);
+
+	return 0;
+tx_err_link_failure:
+	stats->tx_carrier_errors++;
+	dst_link_failure(skb);
+tx_err_dst_release:
+	dst_release(ndst);
+	return err;
+}
+
+static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	const struct iphdr  *iph = ip_hdr(skb);
+	int encap_limit = -1;
+	struct flowi6 fl6;
+	__u8 dsfield;
+	__u32 mtu;
+	int err;
+
+	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		encap_limit = t->parms.encap_limit;
+
+	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+	fl6.flowi6_proto = IPPROTO_IPIP;
+
+	dsfield = ipv4_get_dsfield(iph);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+		fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+					  & IPV6_TCLASS_MASK;
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+		fl6.flowi6_mark = skb->mark;
+
+	err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+	if (err != 0) {
+		/* XXX: send ICMP error even if DF is not set. */
+		if (err == -EMSGSIZE)
+			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+				  htonl(mtu));
+		return -1;
+	}
+
+	return 0;
+}
+
+static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	int encap_limit = -1;
+	__u16 offset;
+	struct flowi6 fl6;
+	__u8 dsfield;
+	__u32 mtu;
+	int err;
+
+	if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+		return -1;
+
+	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+	if (offset > 0) {
+		struct ipv6_tlv_tnl_enc_lim *tel;
+		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+		if (tel->encap_limit == 0) {
+			icmpv6_send(skb, ICMPV6_PARAMPROB,
+				    ICMPV6_HDR_FIELD, offset + 2);
+			return -1;
+		}
+		encap_limit = tel->encap_limit - 1;
+	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		encap_limit = t->parms.encap_limit;
+
+	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+	fl6.flowi6_proto = IPPROTO_IPV6;
+
+	dsfield = ipv6_get_dsfield(ipv6h);
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+		fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+		fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+		fl6.flowi6_mark = skb->mark;
+
+	err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+	if (err != 0) {
+		if (err == -EMSGSIZE)
+			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
+ *   @t: the outgoing tunnel device
+ *   @hdr: IPv6 header from the incoming packet
+ *
+ * Description:
+ *   Avoid trivial tunneling loop by checking that tunnel exit-point
+ *   doesn't match source of incoming packet.
+ *
+ * Return:
+ *   1 if conflict,
+ *   0 else
+ **/
+
+static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
+	const struct ipv6hdr *hdr)
+{
+	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
+}
+
+static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	int encap_limit = -1;
+	struct flowi6 fl6;
+	__u32 mtu;
+	int err;
+
+	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		encap_limit = t->parms.encap_limit;
+
+	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+	fl6.flowi6_proto = skb->protocol;
+
+	err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
+
+	return err;
+}
+
+static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
+	struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct net_device_stats *stats = &t->dev->stats;
+	int ret;
+
+	if (!ip6_tnl_xmit_ctl(t))
+		return -1;
+
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		ret = ip6gre_xmit_ipv4(skb, dev);
+		break;
+	case htons(ETH_P_IPV6):
+		ret = ip6gre_xmit_ipv6(skb, dev);
+		break;
+	default:
+		ret = ip6gre_xmit_other(skb, dev);
+		break;
+	}
+
+	if (ret < 0)
+		goto tx_err;
+
+	return NETDEV_TX_OK;
+
+tx_err:
+	stats->tx_errors++;
+	stats->tx_dropped++;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+{
+	struct net_device *dev = t->dev;
+	struct __ip6_tnl_parm *p = &t->parms;
+	struct flowi6 *fl6 = &t->fl.u.ip6;
+	int addend = sizeof(struct ipv6hdr) + 4;
+
+	if (dev->type != ARPHRD_ETHER) {
+		memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+		memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+	}
+
+	/* Set up flowi template */
+	fl6->saddr = p->laddr;
+	fl6->daddr = p->raddr;
+	fl6->flowi6_oif = p->link;
+	fl6->flowlabel = 0;
+
+	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
+		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
+	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
+		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
+
+	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
+	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
+
+	if (p->flags&IP6_TNL_F_CAP_XMIT &&
+			p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
+		dev->flags |= IFF_POINTOPOINT;
+	else
+		dev->flags &= ~IFF_POINTOPOINT;
+
+	dev->iflink = p->link;
+
+	/* Precalculate GRE options length */
+	if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
+		if (t->parms.o_flags&GRE_CSUM)
+			addend += 4;
+		if (t->parms.o_flags&GRE_KEY)
+			addend += 4;
+		if (t->parms.o_flags&GRE_SEQ)
+			addend += 4;
+	}
+
+	if (p->flags & IP6_TNL_F_CAP_XMIT) {
+		int strict = (ipv6_addr_type(&p->raddr) &
+			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
+
+		struct rt6_info *rt = rt6_lookup(dev_net(dev),
+						 &p->raddr, &p->laddr,
+						 p->link, strict);
+
+		if (rt == NULL)
+			return;
+
+		if (rt->dst.dev) {
+			dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
+
+			if (set_mtu) {
+				dev->mtu = rt->dst.dev->mtu - addend;
+				if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+					dev->mtu -= 8;
+
+				if (dev->mtu < IPV6_MIN_MTU)
+					dev->mtu = IPV6_MIN_MTU;
+			}
+		}
+		dst_release(&rt->dst);
+	}
+
+	t->hlen = addend;
+}
+
+static int ip6gre_tnl_change(struct ip6_tnl *t,
+	const struct __ip6_tnl_parm *p, int set_mtu)
+{
+	t->parms.laddr = p->laddr;
+	t->parms.raddr = p->raddr;
+	t->parms.flags = p->flags;
+	t->parms.hop_limit = p->hop_limit;
+	t->parms.encap_limit = p->encap_limit;
+	t->parms.flowinfo = p->flowinfo;
+	t->parms.link = p->link;
+	t->parms.proto = p->proto;
+	t->parms.i_key = p->i_key;
+	t->parms.o_key = p->o_key;
+	t->parms.i_flags = p->i_flags;
+	t->parms.o_flags = p->o_flags;
+	ip6_tnl_dst_reset(t);
+	ip6gre_tnl_link_config(t, set_mtu);
+	return 0;
+}
+
+static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
+	const struct ip6_tnl_parm2 *u)
+{
+	p->laddr = u->laddr;
+	p->raddr = u->raddr;
+	p->flags = u->flags;
+	p->hop_limit = u->hop_limit;
+	p->encap_limit = u->encap_limit;
+	p->flowinfo = u->flowinfo;
+	p->link = u->link;
+	p->i_key = u->i_key;
+	p->o_key = u->o_key;
+	p->i_flags = u->i_flags;
+	p->o_flags = u->o_flags;
+	memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
+	const struct __ip6_tnl_parm *p)
+{
+	u->proto = IPPROTO_GRE;
+	u->laddr = p->laddr;
+	u->raddr = p->raddr;
+	u->flags = p->flags;
+	u->hop_limit = p->hop_limit;
+	u->encap_limit = p->encap_limit;
+	u->flowinfo = p->flowinfo;
+	u->link = p->link;
+	u->i_key = p->i_key;
+	u->o_key = p->o_key;
+	u->i_flags = p->i_flags;
+	u->o_flags = p->o_flags;
+	memcpy(u->name, p->name, sizeof(u->name));
+}
+
+static int ip6gre_tunnel_ioctl(struct net_device *dev,
+	struct ifreq *ifr, int cmd)
+{
+	int err = 0;
+	struct ip6_tnl_parm2 p;
+	struct __ip6_tnl_parm p1;
+	struct ip6_tnl *t;
+	struct net *net = dev_net(dev);
+	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+	switch (cmd) {
+	case SIOCGETTUNNEL:
+		t = NULL;
+		if (dev == ign->fb_tunnel_dev) {
+			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
+				err = -EFAULT;
+				break;
+			}
+			ip6gre_tnl_parm_from_user(&p1, &p);
+			t = ip6gre_tunnel_locate(net, &p1, 0);
+		}
+		if (t == NULL)
+			t = netdev_priv(dev);
+		ip6gre_tnl_parm_to_user(&p, &t->parms);
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+			err = -EFAULT;
+		break;
+
+	case SIOCADDTUNNEL:
+	case SIOCCHGTUNNEL:
+		err = -EPERM;
+		if (!capable(CAP_NET_ADMIN))
+			goto done;
+
+		err = -EFAULT;
+		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+			goto done;
+
+		err = -EINVAL;
+		if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
+			goto done;
+
+		if (!(p.i_flags&GRE_KEY))
+			p.i_key = 0;
+		if (!(p.o_flags&GRE_KEY))
+			p.o_key = 0;
+
+		ip6gre_tnl_parm_from_user(&p1, &p);
+		t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
+
+		if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
+			if (t != NULL) {
+				if (t->dev != dev) {
+					err = -EEXIST;
+					break;
+				}
+			} else {
+				t = netdev_priv(dev);
+
+				ip6gre_tunnel_unlink(ign, t);
+				synchronize_net();
+				ip6gre_tnl_change(t, &p1, 1);
+				ip6gre_tunnel_link(ign, t);
+				netdev_state_change(dev);
+			}
+		}
+
+		if (t) {
+			err = 0;
+
+			ip6gre_tnl_parm_to_user(&p, &t->parms);
+			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+				err = -EFAULT;
+		} else
+			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+		break;
+
+	case SIOCDELTUNNEL:
+		err = -EPERM;
+		if (!capable(CAP_NET_ADMIN))
+			goto done;
+
+		if (dev == ign->fb_tunnel_dev) {
+			err = -EFAULT;
+			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+				goto done;
+			err = -ENOENT;
+			ip6gre_tnl_parm_from_user(&p1, &p);
+			t = ip6gre_tunnel_locate(net, &p1, 0);
+			if (t == NULL)
+				goto done;
+			err = -EPERM;
+			if (t == netdev_priv(ign->fb_tunnel_dev))
+				goto done;
+			dev = t->dev;
+		}
+		unregister_netdevice(dev);
+		err = 0;
+		break;
+
+	default:
+		err = -EINVAL;
+	}
+
+done:
+	return err;
+}
+
+static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct ip6_tnl *tunnel = netdev_priv(dev);
+	if (new_mtu < 68 ||
+	    new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
+			unsigned short type,
+			const void *daddr, const void *saddr, unsigned int len)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
+	__be16 *p = (__be16 *)(ipv6h+1);
+
+	*(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
+	ipv6h->hop_limit = t->parms.hop_limit;
+	ipv6h->nexthdr = NEXTHDR_GRE;
+	ipv6h->saddr = t->parms.laddr;
+	ipv6h->daddr = t->parms.raddr;
+
+	p[0]		= t->parms.o_flags;
+	p[1]		= htons(type);
+
+	/*
+	 *	Set the source hardware address.
+	 */
+
+	if (saddr)
+		memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
+	if (daddr)
+		memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
+	if (!ipv6_addr_any(&ipv6h->daddr))
+		return t->hlen;
+
+	return -t->hlen;
+}
+
+static int ip6gre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb_mac_header(skb);
+	memcpy(haddr, &ipv6h->saddr, sizeof(struct in6_addr));
+	return sizeof(struct in6_addr);
+}
+
+static const struct header_ops ip6gre_header_ops = {
+	.create	= ip6gre_header,
+	.parse	= ip6gre_header_parse,
+};
+
+static const struct net_device_ops ip6gre_netdev_ops = {
+	.ndo_init		= ip6gre_tunnel_init,
+	.ndo_uninit		= ip6gre_tunnel_uninit,
+	.ndo_start_xmit		= ip6gre_tunnel_xmit,
+	.ndo_do_ioctl		= ip6gre_tunnel_ioctl,
+	.ndo_change_mtu		= ip6gre_tunnel_change_mtu,
+	.ndo_get_stats64	= ip6gre_get_stats64,
+};
+
+static void ip6gre_dev_free(struct net_device *dev)
+{
+	free_percpu(dev->tstats);
+	free_netdev(dev);
+}
+
+static void ip6gre_tunnel_setup(struct net_device *dev)
+{
+	struct ip6_tnl *t;
+
+	dev->netdev_ops = &ip6gre_netdev_ops;
+	dev->destructor = ip6gre_dev_free;
+
+	dev->type = ARPHRD_IP6GRE;
+	dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
+	dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
+	t = netdev_priv(dev);
+	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		dev->mtu -= 8;
+	dev->flags |= IFF_NOARP;
+	dev->iflink = 0;
+	dev->addr_len = sizeof(struct in6_addr);
+	dev->features |= NETIF_F_NETNS_LOCAL;
+	dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+static int ip6gre_tunnel_init(struct net_device *dev)
+{
+	struct ip6_tnl *tunnel;
+
+	tunnel = netdev_priv(dev);
+
+	tunnel->dev = dev;
+	strcpy(tunnel->parms.name, dev->name);
+
+	memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
+	memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
+
+	if (ipv6_addr_any(&tunnel->parms.raddr))
+		dev->header_ops = &ip6gre_header_ops;
+
+	dev->tstats = alloc_percpu(struct pcpu_tstats);
+	if (!dev->tstats)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void ip6gre_fb_tunnel_init(struct net_device *dev)
+{
+	struct ip6_tnl *tunnel = netdev_priv(dev);
+
+	tunnel->dev = dev;
+	strcpy(tunnel->parms.name, dev->name);
+
+	tunnel->hlen		= sizeof(struct ipv6hdr) + 4;
+
+	dev_hold(dev);
+}
+
+
+static struct inet6_protocol ip6gre_protocol __read_mostly = {
+	.handler     = ip6gre_rcv,
+	.err_handler = ip6gre_err,
+	.flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+};
+
+static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
+	struct list_head *head)
+{
+	int prio;
+
+	for (prio = 0; prio < 4; prio++) {
+		int h;
+		for (h = 0; h < HASH_SIZE; h++) {
+			struct ip6_tnl *t;
+
+			t = rtnl_dereference(ign->tunnels[prio][h]);
+
+			while (t != NULL) {
+				unregister_netdevice_queue(t->dev, head);
+				t = rtnl_dereference(t->next);
+			}
+		}
+	}
+}
+
+static int __net_init ip6gre_init_net(struct net *net)
+{
+	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+	int err;
+
+	ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
+					   ip6gre_tunnel_setup);
+	if (!ign->fb_tunnel_dev) {
+		err = -ENOMEM;
+		goto err_alloc_dev;
+	}
+	dev_net_set(ign->fb_tunnel_dev, net);
+
+	ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
+	ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
+
+	err = register_netdev(ign->fb_tunnel_dev);
+	if (err)
+		goto err_reg_dev;
+
+	rcu_assign_pointer(ign->tunnels_wc[0],
+			   netdev_priv(ign->fb_tunnel_dev));
+	return 0;
+
+err_reg_dev:
+	ip6gre_dev_free(ign->fb_tunnel_dev);
+err_alloc_dev:
+	return err;
+}
+
+static void __net_exit ip6gre_exit_net(struct net *net)
+{
+	struct ip6gre_net *ign;
+	LIST_HEAD(list);
+
+	ign = net_generic(net, ip6gre_net_id);
+	rtnl_lock();
+	ip6gre_destroy_tunnels(ign, &list);
+	unregister_netdevice_many(&list);
+	rtnl_unlock();
+}
+
+static struct pernet_operations ip6gre_net_ops = {
+	.init = ip6gre_init_net,
+	.exit = ip6gre_exit_net,
+	.id   = &ip6gre_net_id,
+	.size = sizeof(struct ip6gre_net),
+};
+
+static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	__be16 flags;
+
+	if (!data)
+		return 0;
+
+	flags = 0;
+	if (data[IFLA_GRE_IFLAGS])
+		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
+	if (data[IFLA_GRE_OFLAGS])
+		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
+	if (flags & (GRE_VERSION|GRE_ROUTING))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	struct in6_addr daddr;
+
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+
+	if (!data)
+		goto out;
+
+	if (data[IFLA_GRE_REMOTE]) {
+		nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+		if (ipv6_addr_any(&daddr))
+			return -EINVAL;
+	}
+
+out:
+	return ip6gre_tunnel_validate(tb, data);
+}
+
+
+static void ip6gre_netlink_parms(struct nlattr *data[],
+				struct __ip6_tnl_parm *parms)
+{
+	memset(parms, 0, sizeof(*parms));
+
+	if (!data)
+		return;
+
+	if (data[IFLA_GRE_LINK])
+		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
+
+	if (data[IFLA_GRE_IFLAGS])
+		parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
+
+	if (data[IFLA_GRE_OFLAGS])
+		parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
+
+	if (data[IFLA_GRE_IKEY])
+		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
+
+	if (data[IFLA_GRE_OKEY])
+		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
+
+	if (data[IFLA_GRE_LOCAL])
+		nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
+
+	if (data[IFLA_GRE_REMOTE])
+		nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+
+	if (data[IFLA_GRE_TTL])
+		parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
+
+	if (data[IFLA_GRE_ENCAP_LIMIT])
+		parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
+
+	if (data[IFLA_GRE_FLOWINFO])
+		parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
+
+	if (data[IFLA_GRE_FLAGS])
+		parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
+}
+
+static int ip6gre_tap_init(struct net_device *dev)
+{
+	struct ip6_tnl *tunnel;
+
+	tunnel = netdev_priv(dev);
+
+	tunnel->dev = dev;
+	strcpy(tunnel->parms.name, dev->name);
+
+	ip6gre_tnl_link_config(tunnel, 1);
+
+	dev->tstats = alloc_percpu(struct pcpu_tstats);
+	if (!dev->tstats)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static const struct net_device_ops ip6gre_tap_netdev_ops = {
+	.ndo_init = ip6gre_tap_init,
+	.ndo_uninit = ip6gre_tunnel_uninit,
+	.ndo_start_xmit = ip6gre_tunnel_xmit,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+	.ndo_change_mtu = ip6gre_tunnel_change_mtu,
+	.ndo_get_stats64 = ip6gre_get_stats64,
+};
+
+static void ip6gre_tap_setup(struct net_device *dev)
+{
+
+	ether_setup(dev);
+
+	dev->netdev_ops = &ip6gre_tap_netdev_ops;
+	dev->destructor = ip6gre_dev_free;
+
+	dev->iflink = 0;
+	dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+	struct nlattr *tb[], struct nlattr *data[])
+{
+	struct ip6_tnl *nt;
+	struct net *net = dev_net(dev);
+	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+	int err;
+
+	nt = netdev_priv(dev);
+	ip6gre_netlink_parms(data, &nt->parms);
+
+	if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+		return -EEXIST;
+
+	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
+		eth_hw_addr_random(dev);
+
+	nt->dev = dev;
+	ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+
+	/* Can use a lockless transmit, unless we generate output sequences */
+	if (!(nt->parms.o_flags & GRE_SEQ))
+		dev->features |= NETIF_F_LLTX;
+
+	err = register_netdevice(dev);
+	if (err)
+		goto out;
+
+	dev_hold(dev);
+	ip6gre_tunnel_link(ign, nt);
+
+out:
+	return err;
+}
+
+static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+			    struct nlattr *data[])
+{
+	struct ip6_tnl *t, *nt;
+	struct net *net = dev_net(dev);
+	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+	struct __ip6_tnl_parm p;
+
+	if (dev == ign->fb_tunnel_dev)
+		return -EINVAL;
+
+	nt = netdev_priv(dev);
+	ip6gre_netlink_parms(data, &p);
+
+	t = ip6gre_tunnel_locate(net, &p, 0);
+
+	if (t) {
+		if (t->dev != dev)
+			return -EEXIST;
+	} else {
+		t = nt;
+
+		ip6gre_tunnel_unlink(ign, t);
+		ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+		ip6gre_tunnel_link(ign, t);
+		netdev_state_change(dev);
+	}
+
+	return 0;
+}
+
+static size_t ip6gre_get_size(const struct net_device *dev)
+{
+	return
+		/* IFLA_GRE_LINK */
+		nla_total_size(4) +
+		/* IFLA_GRE_IFLAGS */
+		nla_total_size(2) +
+		/* IFLA_GRE_OFLAGS */
+		nla_total_size(2) +
+		/* IFLA_GRE_IKEY */
+		nla_total_size(4) +
+		/* IFLA_GRE_OKEY */
+		nla_total_size(4) +
+		/* IFLA_GRE_LOCAL */
+		nla_total_size(4) +
+		/* IFLA_GRE_REMOTE */
+		nla_total_size(4) +
+		/* IFLA_GRE_TTL */
+		nla_total_size(1) +
+		/* IFLA_GRE_TOS */
+		nla_total_size(1) +
+		/* IFLA_GRE_ENCAP_LIMIT */
+		nla_total_size(1) +
+		/* IFLA_GRE_FLOWINFO */
+		nla_total_size(4) +
+		/* IFLA_GRE_FLAGS */
+		nla_total_size(4) +
+		0;
+}
+
+static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct __ip6_tnl_parm *p = &t->parms;
+
+	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+	    nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
+	    nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
+	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
+	    nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
+	    nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
+	    nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
+	    /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
+	    nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
+	    nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
+	    nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
+	[IFLA_GRE_LINK]        = { .type = NLA_U32 },
+	[IFLA_GRE_IFLAGS]      = { .type = NLA_U16 },
+	[IFLA_GRE_OFLAGS]      = { .type = NLA_U16 },
+	[IFLA_GRE_IKEY]        = { .type = NLA_U32 },
+	[IFLA_GRE_OKEY]        = { .type = NLA_U32 },
+	[IFLA_GRE_LOCAL]       = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
+	[IFLA_GRE_REMOTE]      = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
+	[IFLA_GRE_TTL]         = { .type = NLA_U8 },
+	[IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
+	[IFLA_GRE_FLOWINFO]    = { .type = NLA_U32 },
+	[IFLA_GRE_FLAGS]       = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+	.kind		= "ip6gre",
+	.maxtype	= IFLA_GRE_MAX,
+	.policy		= ip6gre_policy,
+	.priv_size	= sizeof(struct ip6_tnl),
+	.setup		= ip6gre_tunnel_setup,
+	.validate	= ip6gre_tunnel_validate,
+	.newlink	= ip6gre_newlink,
+	.changelink	= ip6gre_changelink,
+	.get_size	= ip6gre_get_size,
+	.fill_info	= ip6gre_fill_info,
+};
+
+static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
+	.kind		= "ip6gretap",
+	.maxtype	= IFLA_GRE_MAX,
+	.policy		= ip6gre_policy,
+	.priv_size	= sizeof(struct ip6_tnl),
+	.setup		= ip6gre_tap_setup,
+	.validate	= ip6gre_tap_validate,
+	.newlink	= ip6gre_newlink,
+	.changelink	= ip6gre_changelink,
+	.get_size	= ip6gre_get_size,
+	.fill_info	= ip6gre_fill_info,
+};
+
+/*
+ *	And now the modules code and kernel interface.
+ */
+
+static int __init ip6gre_init(void)
+{
+	int err;
+
+	pr_info("GRE over IPv6 tunneling driver\n");
+
+	err = register_pernet_device(&ip6gre_net_ops);
+	if (err < 0)
+		return err;
+
+	err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
+	if (err < 0) {
+		pr_info("%s: can't add protocol\n", __func__);
+		goto add_proto_failed;
+	}
+
+	err = rtnl_link_register(&ip6gre_link_ops);
+	if (err < 0)
+		goto rtnl_link_failed;
+
+	err = rtnl_link_register(&ip6gre_tap_ops);
+	if (err < 0)
+		goto tap_ops_failed;
+
+out:
+	return err;
+
+tap_ops_failed:
+	rtnl_link_unregister(&ip6gre_link_ops);
+rtnl_link_failed:
+	inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+add_proto_failed:
+	unregister_pernet_device(&ip6gre_net_ops);
+	goto out;
+}
+
+static void __exit ip6gre_fini(void)
+{
+	rtnl_link_unregister(&ip6gre_tap_ops);
+	rtnl_link_unregister(&ip6gre_link_ops);
+	inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+	unregister_pernet_device(&ip6gre_net_ops);
+}
+
+module_init(ip6gre_init);
+module_exit(ip6gre_fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
+MODULE_ALIAS_RTNL_LINK("ip6gre");
+MODULE_ALIAS_NETDEV("ip6gre0");
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9a1d5fe..cb7e2de 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,7 +126,7 @@
  * Locking : hash tables are protected by RCU and RTNL
  */
 
-static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 {
 	struct dst_entry *dst = t->dst_cache;
 
@@ -139,20 +139,23 @@
 
 	return dst;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
 
-static inline void ip6_tnl_dst_reset(struct ip6_tnl *t)
+void ip6_tnl_dst_reset(struct ip6_tnl *t)
 {
 	dst_release(t->dst_cache);
 	t->dst_cache = NULL;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
 
-static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
 {
 	struct rt6_info *rt = (struct rt6_info *) dst;
 	t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
 	dst_release(t->dst_cache);
 	t->dst_cache = dst;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
 
 /**
  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -200,7 +203,7 @@
  **/
 
 static struct ip6_tnl __rcu **
-ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
+ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
 {
 	const struct in6_addr *remote = &p->raddr;
 	const struct in6_addr *local = &p->laddr;
@@ -267,7 +270,7 @@
  *   created tunnel or NULL
  **/
 
-static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
+static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
 {
 	struct net_device *dev;
 	struct ip6_tnl *t;
@@ -322,7 +325,7 @@
  **/
 
 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
-		struct ip6_tnl_parm *p, int create)
+		struct __ip6_tnl_parm *p, int create)
 {
 	const struct in6_addr *remote = &p->raddr;
 	const struct in6_addr *local = &p->laddr;
@@ -374,8 +377,7 @@
  *   else index to encapsulation limit
  **/
 
-static __u16
-parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
 {
 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
 	__u8 nexthdr = ipv6h->nexthdr;
@@ -425,6 +427,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
 
 /**
  * ip6_tnl_err - tunnel error handler
@@ -480,7 +483,7 @@
 	case ICMPV6_PARAMPROB:
 		teli = 0;
 		if ((*code) == ICMPV6_HDR_FIELD)
-			teli = parse_tlv_tnl_enc_lim(skb, skb->data);
+			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
 
 		if (teli && teli == *info - 2) {
 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
@@ -693,11 +696,11 @@
 		IP6_ECN_set_ce(ipv6_hdr(skb));
 }
 
-static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t,
 			     const struct in6_addr *laddr,
 			     const struct in6_addr *raddr)
 {
-	struct ip6_tnl_parm *p = &t->parms;
+	struct __ip6_tnl_parm *p = &t->parms;
 	int ltype = ipv6_addr_type(laddr);
 	int rtype = ipv6_addr_type(raddr);
 	__u32 flags = 0;
@@ -715,13 +718,14 @@
 	}
 	return flags;
 }
+EXPORT_SYMBOL(ip6_tnl_get_cap);
 
 /* called with rcu_read_lock() */
-static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
 				  const struct in6_addr *laddr,
 				  const struct in6_addr *raddr)
 {
-	struct ip6_tnl_parm *p = &t->parms;
+	struct __ip6_tnl_parm *p = &t->parms;
 	int ret = 0;
 	struct net *net = dev_net(t->dev);
 
@@ -740,6 +744,7 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
 /**
  * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
@@ -859,9 +864,9 @@
 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
 }
 
-static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
 {
-	struct ip6_tnl_parm *p = &t->parms;
+	struct __ip6_tnl_parm *p = &t->parms;
 	int ret = 0;
 	struct net *net = dev_net(t->dev);
 
@@ -885,6 +890,8 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
+
 /**
  * ip6_tnl_xmit2 - encapsulate packet and send
  *   @skb: the outgoing socket buffer
@@ -1085,7 +1092,7 @@
 	    !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
 		return -1;
 
-	offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
+	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
 	if (offset > 0) {
 		struct ipv6_tlv_tnl_enc_lim *tel;
 		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
@@ -1152,7 +1159,7 @@
 static void ip6_tnl_link_config(struct ip6_tnl *t)
 {
 	struct net_device *dev = t->dev;
-	struct ip6_tnl_parm *p = &t->parms;
+	struct __ip6_tnl_parm *p = &t->parms;
 	struct flowi6 *fl6 = &t->fl.u.ip6;
 
 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1215,7 +1222,7 @@
  **/
 
 static int
-ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
+ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
 {
 	t->parms.laddr = p->laddr;
 	t->parms.raddr = p->raddr;
@@ -1230,6 +1237,34 @@
 	return 0;
 }
 
+static void
+ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
+{
+	p->laddr = u->laddr;
+	p->raddr = u->raddr;
+	p->flags = u->flags;
+	p->hop_limit = u->hop_limit;
+	p->encap_limit = u->encap_limit;
+	p->flowinfo = u->flowinfo;
+	p->link = u->link;
+	p->proto = u->proto;
+	memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void
+ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
+{
+	u->laddr = p->laddr;
+	u->raddr = p->raddr;
+	u->flags = p->flags;
+	u->hop_limit = p->hop_limit;
+	u->encap_limit = p->encap_limit;
+	u->flowinfo = p->flowinfo;
+	u->link = p->link;
+	u->proto = p->proto;
+	memcpy(u->name, p->name, sizeof(u->name));
+}
+
 /**
  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
  *   @dev: virtual device associated with tunnel
@@ -1263,6 +1298,7 @@
 {
 	int err = 0;
 	struct ip6_tnl_parm p;
+	struct __ip6_tnl_parm p1;
 	struct ip6_tnl *t = NULL;
 	struct net *net = dev_net(dev);
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -1274,11 +1310,14 @@
 				err = -EFAULT;
 				break;
 			}
-			t = ip6_tnl_locate(net, &p, 0);
+			ip6_tnl_parm_from_user(&p1, &p);
+			t = ip6_tnl_locate(net, &p1, 0);
+		} else {
+			memset(&p, 0, sizeof(p));
 		}
 		if (t == NULL)
 			t = netdev_priv(dev);
-		memcpy(&p, &t->parms, sizeof (p));
+		ip6_tnl_parm_to_user(&p, &t->parms);
 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
 			err = -EFAULT;
 		}
@@ -1295,7 +1334,8 @@
 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
 		    p.proto != 0)
 			break;
-		t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
+		ip6_tnl_parm_from_user(&p1, &p);
+		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
 		if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
 			if (t != NULL) {
 				if (t->dev != dev) {
@@ -1307,13 +1347,14 @@
 
 			ip6_tnl_unlink(ip6n, t);
 			synchronize_net();
-			err = ip6_tnl_change(t, &p);
+			err = ip6_tnl_change(t, &p1);
 			ip6_tnl_link(ip6n, t);
 			netdev_state_change(dev);
 		}
 		if (t) {
 			err = 0;
-			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
+			ip6_tnl_parm_to_user(&p, &t->parms);
+			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
 				err = -EFAULT;
 
 		} else
@@ -1329,7 +1370,9 @@
 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
 				break;
 			err = -ENOENT;
-			if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
+			ip6_tnl_parm_from_user(&p1, &p);
+			t = ip6_tnl_locate(net, &p1, 0);
+			if (t == NULL)
 				break;
 			err = -EPERM;
 			if (t->dev == ip6n->fb_tnl_dev)
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 325e59a..beb5777 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -61,9 +61,7 @@
 	net->ipv6.ip6table_filter =
 		ip6t_register_table(net, &packet_filter, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv6.ip6table_filter))
-		return PTR_ERR(net->ipv6.ip6table_filter);
-	return 0;
+	return PTR_RET(net->ipv6.ip6table_filter);
 }
 
 static void __net_exit ip6table_filter_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 4d78240..7431121 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -97,9 +97,7 @@
 	net->ipv6.ip6table_mangle =
 		ip6t_register_table(net, &packet_mangler, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv6.ip6table_mangle))
-		return PTR_ERR(net->ipv6.ip6table_mangle);
-	return 0;
+	return PTR_RET(net->ipv6.ip6table_mangle);
 }
 
 static void __net_exit ip6table_mangle_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 5b9926a..60d1bdd 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -40,9 +40,7 @@
 	net->ipv6.ip6table_raw =
 		ip6t_register_table(net, &packet_raw, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv6.ip6table_raw))
-		return PTR_ERR(net->ipv6.ip6table_raw);
-	return 0;
+	return PTR_RET(net->ipv6.ip6table_raw);
 }
 
 static void __net_exit ip6table_raw_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 91aa2b4..db15535 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -58,10 +58,7 @@
 	net->ipv6.ip6table_security =
 		ip6t_register_table(net, &security_table, repl);
 	kfree(repl);
-	if (IS_ERR(net->ipv6.ip6table_security))
-		return PTR_ERR(net->ipv6.ip6table_security);
-
-	return 0;
+	return PTR_RET(net->ipv6.ip6table_security);
 }
 
 static void __net_exit ip6table_security_net_exit(struct net *net)
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index da2e92d..745a320 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -307,10 +307,10 @@
 		goto proc_dev_snmp6_fail;
 	return 0;
 
+proc_dev_snmp6_fail:
+	proc_net_remove(net, "snmp6");
 proc_snmp6_fail:
 	proc_net_remove(net, "sockstat6");
-proc_dev_snmp6_fail:
-	proc_net_remove(net, "dev_snmp6");
 	return -ENOMEM;
 }
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8e80fd2..0ddf2d1 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -965,7 +965,7 @@
 {
 	int flags = 0;
 
-	fl6->flowi6_iif = net->loopback_dev->ifindex;
+	fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
 		flags |= RT6_LOOKUP_F_IFACE;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4b5b335..f99b81d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -94,6 +94,18 @@
 }
 #endif
 
+static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+	dst_hold(dst);
+	sk->sk_rx_dst = dst;
+	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+	if (rt->rt6i_node)
+		inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+}
+
 static void tcp_v6_hash(struct sock *sk)
 {
 	if (sk->sk_state != TCP_CLOSE) {
@@ -1270,6 +1282,7 @@
 
 	newsk->sk_gso_type = SKB_GSO_TCPV6;
 	__ip6_dst_store(newsk, dst, NULL, NULL);
+	inet6_sk_rx_dst_set(newsk, skb);
 
 	newtcp6sk = (struct tcp6_sock *)newsk;
 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
@@ -1447,7 +1460,17 @@
 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
 
 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+		struct dst_entry *dst = sk->sk_rx_dst;
+
 		sock_rps_save_rxhash(sk, skb);
+		if (dst) {
+			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
+			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
+				dst_release(dst);
+				sk->sk_rx_dst = NULL;
+			}
+		}
+
 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
 			goto reset;
 		if (opt_skb)
@@ -1705,9 +1728,9 @@
 			struct dst_entry *dst = sk->sk_rx_dst;
 			struct inet_sock *icsk = inet_sk(sk);
 			if (dst)
-				dst = dst_check(dst, 0);
+				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
 			if (dst &&
-			    icsk->rx_dst_ifindex == inet6_iif(skb))
+			    icsk->rx_dst_ifindex == skb->skb_iif)
 				skb_dst_set_noref(skb, dst);
 		}
 	}
@@ -1723,6 +1746,7 @@
 	.queue_xmit	   = inet6_csk_xmit,
 	.send_check	   = tcp_v6_send_check,
 	.rebuild_header	   = inet6_sk_rebuild_header,
+	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
 	.conn_request	   = tcp_v6_conn_request,
 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 	.net_header_len	   = sizeof(struct ipv6hdr),
@@ -1754,6 +1778,7 @@
 	.queue_xmit	   = ip_queue_xmit,
 	.send_check	   = tcp_v4_send_check,
 	.rebuild_header	   = inet_sk_rebuild_header,
+	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
 	.conn_request	   = tcp_v6_conn_request,
 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 	.net_header_len	   = sizeof(struct iphdr),
@@ -1875,7 +1900,7 @@
 		   tp->write_seq-tp->snd_una,
 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
 		   timer_active,
-		   jiffies_to_clock_t(timer_expires - jiffies),
+		   jiffies_delta_to_clock_t(timer_expires - jiffies),
 		   icsk->icsk_retransmits,
 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
 		   icsk->icsk_probes_out,
@@ -1895,10 +1920,7 @@
 	const struct in6_addr *dest, *src;
 	__u16 destp, srcp;
 	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
-	int ttd = tw->tw_ttd - jiffies;
-
-	if (ttd < 0)
-		ttd = 0;
+	long delta = tw->tw_ttd - jiffies;
 
 	dest = &tw6->tw_v6_daddr;
 	src  = &tw6->tw_v6_rcv_saddr;
@@ -1914,7 +1936,7 @@
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
 		   tw->tw_substate, 0, 0,
-		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
 		   atomic_read(&tw->tw_refcnt), tw);
 }
 
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ef39812..f8c4c08 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -73,6 +73,13 @@
 	return 0;
 }
 
+static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst)
+{
+	struct rt6_info *rt = (struct rt6_info *)xdst;
+
+	rt6_init_peer(rt, net->ipv6.peers);
+}
+
 static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 			   int nfheader_len)
 {
@@ -286,6 +293,7 @@
 	.get_saddr = 		xfrm6_get_saddr,
 	.decode_session =	_decode_session6,
 	.get_tos =		xfrm6_get_tos,
+	.init_dst =		xfrm6_init_dst,
 	.init_path =		xfrm6_init_path,
 	.fill_dst =		xfrm6_fill_dst,
 	.blackhole_route =	ip6_blackhole_route,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 0481d4b..334f93b 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3024,7 +3024,7 @@
 	return res;
 }
 
-static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp, int dir)
+static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
 {
 	struct sk_buff *skb;
 	struct sadb_msg *hdr;
@@ -3105,7 +3105,7 @@
 	pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
 	pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
 	pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
-	pol->sadb_x_policy_dir = dir+1;
+	pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
 	pol->sadb_x_policy_id = xp->index;
 
 	/* Set sadb_comb's. */
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 35e1e4b..9275471 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -410,6 +410,7 @@
 	lsa->l2tp_family = AF_INET6;
 	lsa->l2tp_flowinfo = 0;
 	lsa->l2tp_scope_id = 0;
+	lsa->l2tp_unused = 0;
 	if (peer) {
 		if (!lsk->peer_conn_id)
 			return -ENOTCONN;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index f6fe4d4..c219000 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -969,14 +969,13 @@
 	struct sockaddr_llc sllc;
 	struct sock *sk = sock->sk;
 	struct llc_sock *llc = llc_sk(sk);
-	int rc = 0;
+	int rc = -EBADF;
 
 	memset(&sllc, 0, sizeof(sllc));
 	lock_sock(sk);
 	if (sock_flag(sk, SOCK_ZAPPED))
 		goto out;
 	*uaddrlen = sizeof(sllc);
-	memset(uaddr, 0, *uaddrlen);
 	if (peer) {
 		rc = -ENOTCONN;
 		if (sk->sk_state != TCP_ESTABLISHED)
@@ -1206,7 +1205,7 @@
 	rc = llc_proc_init();
 	if (rc != 0) {
 		printk(llc_proc_err_msg);
-		goto out_unregister_llc_proto;
+		goto out_station;
 	}
 	rc = llc_sysctl_init();
 	if (rc) {
@@ -1226,7 +1225,8 @@
 	llc_sysctl_exit();
 out_proc:
 	llc_proc_exit();
-out_unregister_llc_proto:
+out_station:
+	llc_station_exit();
 	proto_unregister(&llc_proto);
 	goto out;
 }
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index e32cab4..dd3e833 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -42,6 +42,7 @@
 void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
 					    struct sk_buff *skb))
 {
+	smp_wmb(); /* ensure initialisation is complete before it's called */
 	if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)
 		llc_type_handlers[type - 1] = handler;
 }
@@ -50,11 +51,19 @@
 {
 	if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)
 		llc_type_handlers[type - 1] = NULL;
+	synchronize_net();
 }
 
 void llc_set_station_handler(void (*handler)(struct sk_buff *skb))
 {
+	/* Ensure initialisation is complete before it's called */
+	if (handler)
+		smp_wmb();
+
 	llc_station_handler = handler;
+
+	if (!handler)
+		synchronize_net();
 }
 
 /**
@@ -150,6 +159,8 @@
 	int dest;
 	int (*rcv)(struct sk_buff *, struct net_device *,
 		   struct packet_type *, struct net_device *);
+	void (*sta_handler)(struct sk_buff *skb);
+	void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
 
 	if (!net_eq(dev_net(dev), &init_net))
 		goto drop;
@@ -182,7 +193,8 @@
 	 */
 	rcv = rcu_dereference(sap->rcv_func);
 	dest = llc_pdu_type(skb);
-	if (unlikely(!dest || !llc_type_handlers[dest - 1])) {
+	sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL;
+	if (unlikely(!sap_handler)) {
 		if (rcv)
 			rcv(skb, dev, pt, orig_dev);
 		else
@@ -193,7 +205,7 @@
 			if (cskb)
 				rcv(cskb, dev, pt, orig_dev);
 		}
-		llc_type_handlers[dest - 1](sap, skb);
+		sap_handler(sap, skb);
 	}
 	llc_sap_put(sap);
 out:
@@ -202,9 +214,10 @@
 	kfree_skb(skb);
 	goto out;
 handle_station:
-	if (!llc_station_handler)
+	sta_handler = ACCESS_ONCE(llc_station_handler);
+	if (!sta_handler)
 		goto drop;
-	llc_station_handler(skb);
+	sta_handler(skb);
 	goto out;
 }
 
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 39a8d89..b2f2bac 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -268,7 +268,7 @@
 out:
 	return rc;
 free:
-	kfree_skb(skb);
+	kfree_skb(nskb);
 	goto out;
 }
 
@@ -293,7 +293,7 @@
 out:
 	return rc;
 free:
-	kfree_skb(skb);
+	kfree_skb(nskb);
 	goto out;
 }
 
@@ -322,7 +322,7 @@
 out:
 	return rc;
 free:
-	kfree_skb(skb);
+	kfree_skb(nskb);
 	goto out;
 }
 
@@ -687,12 +687,8 @@
 	llc_station_state_process(skb);
 }
 
-int __init llc_station_init(void)
+void __init llc_station_init(void)
 {
-	int rc = -ENOBUFS;
-	struct sk_buff *skb;
-	struct llc_station_state_ev *ev;
-
 	skb_queue_head_init(&llc_main_station.mac_pdu_q);
 	skb_queue_head_init(&llc_main_station.ev_q.list);
 	spin_lock_init(&llc_main_station.ev_q.lock);
@@ -700,23 +696,12 @@
 			(unsigned long)&llc_main_station);
 	llc_main_station.ack_timer.expires  = jiffies +
 						sysctl_llc_station_ack_timeout;
-	skb = alloc_skb(0, GFP_ATOMIC);
-	if (!skb)
-		goto out;
-	rc = 0;
-	llc_set_station_handler(llc_station_rcv);
-	ev = llc_station_ev(skb);
-	memset(ev, 0, sizeof(*ev));
 	llc_main_station.maximum_retry	= 1;
-	llc_main_station.state		= LLC_STATION_STATE_DOWN;
-	ev->type	= LLC_STATION_EV_TYPE_SIMPLE;
-	ev->prim_type	= LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK;
-	rc = llc_station_next_state(skb);
-out:
-	return rc;
+	llc_main_station.state		= LLC_STATION_STATE_UP;
+	llc_set_station_handler(llc_station_rcv);
 }
 
-void __exit llc_station_exit(void)
+void llc_station_exit(void)
 {
 	llc_set_station_handler(NULL);
 }
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 8dfd70d..a04752e 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -38,14 +38,10 @@
 static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
 				const u8 *addr[], const size_t *len, u8 *mac)
 {
-	u8 scratch[2 * AES_BLOCK_SIZE];
-	u8 *cbc, *pad;
+	u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
 	const u8 *pos, *end;
 	size_t i, e, left, total_len;
 
-	cbc = scratch;
-	pad = scratch + AES_BLOCK_SIZE;
-
 	memset(cbc, 0, AES_BLOCK_SIZE);
 
 	total_len = 0;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d41974a..929f897 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -102,6 +102,18 @@
 	return 0;
 }
 
+static int ieee80211_start_p2p_device(struct wiphy *wiphy,
+				      struct wireless_dev *wdev)
+{
+	return ieee80211_do_open(wdev, true);
+}
+
+static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
+				      struct wireless_dev *wdev)
+{
+	ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
+}
+
 static int ieee80211_set_noack_map(struct wiphy *wiphy,
 				  struct net_device *dev,
 				  u16 noack_map)
@@ -330,7 +342,7 @@
 	if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
 		struct ieee80211_supported_band *sband;
 		sband = sta->local->hw.wiphy->bands[
-				sta->local->hw.conf.channel->band];
+				sta->local->oper_channel->band];
 		rate->legacy = sband->bitrates[idx].bitrate;
 	} else
 		rate->mcs = idx;
@@ -725,25 +737,23 @@
 static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
 				    const u8 *resp, size_t resp_len)
 {
-	struct sk_buff *new, *old;
+	struct probe_resp *new, *old;
 
 	if (!resp || !resp_len)
 		return 1;
 
 	old = rtnl_dereference(sdata->u.ap.probe_resp);
 
-	new = dev_alloc_skb(resp_len);
+	new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
 
-	memcpy(skb_put(new, resp_len), resp, resp_len);
+	new->len = resp_len;
+	memcpy(new->data, resp, resp_len);
 
 	rcu_assign_pointer(sdata->u.ap.probe_resp, new);
-	if (old) {
-		/* TODO: use call_rcu() */
-		synchronize_rcu();
-		dev_kfree_skb(old);
-	}
+	if (old)
+		kfree_rcu(old, rcu_head);
 
 	return 0;
 }
@@ -950,7 +960,7 @@
 	/* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
 	 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
 
-	memset(msg->da, 0xff, ETH_ALEN);
+	eth_broadcast_addr(msg->da);
 	memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
 	msg->len = htons(6);
 	msg->dsap = 0;
@@ -1285,9 +1295,10 @@
 	mutex_unlock(&local->sta_mtx);
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-	    params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
+	    params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
 		ieee80211_recalc_ps(local, -1);
-
+		ieee80211_recalc_ps_vif(sdata);
+	}
 	return 0;
 }
 
@@ -1661,7 +1672,7 @@
 	}
 
 	if (!sdata->vif.bss_conf.use_short_slot &&
-	    sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
+	    sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) {
 		sdata->vif.bss_conf.use_short_slot = true;
 		changed |= BSS_CHANGED_ERP_SLOT;
 	}
@@ -1775,6 +1786,7 @@
 	case NL80211_IFTYPE_ADHOC:
 	case NL80211_IFTYPE_MESH_POINT:
 	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		break;
 	case NL80211_IFTYPE_P2P_GO:
 		if (sdata->local->ops->hw_scan)
@@ -1927,7 +1939,7 @@
 				  enum nl80211_tx_power_setting type, int mbm)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
-	struct ieee80211_channel *chan = local->hw.conf.channel;
+	struct ieee80211_channel *chan = local->oper_channel;
 	u32 changes = 0;
 
 	switch (type) {
@@ -2079,6 +2091,7 @@
 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
 
 	ieee80211_recalc_ps(local, -1);
+	ieee80211_recalc_ps_vif(sdata);
 
 	return 0;
 }
@@ -2461,6 +2474,9 @@
 		if (!sdata->u.mgd.associated)
 			need_offchan = true;
 		break;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		need_offchan = true;
+		break;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -2653,6 +2669,7 @@
 			       u16 status_code, struct sk_buff *skb)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_tdls_data *tf;
 
 	tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2672,8 +2689,10 @@
 		tf->u.setup_req.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(sdata, skb, false);
-		ieee80211_add_ext_srates_ie(sdata, skb, false);
+		ieee80211_add_srates_ie(sdata, skb, false,
+					local->oper_channel->band);
+		ieee80211_add_ext_srates_ie(sdata, skb, false,
+					    local->oper_channel->band);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	case WLAN_TDLS_SETUP_RESPONSE:
@@ -2686,8 +2705,10 @@
 		tf->u.setup_resp.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(sdata, skb, false);
-		ieee80211_add_ext_srates_ie(sdata, skb, false);
+		ieee80211_add_srates_ie(sdata, skb, false,
+					local->oper_channel->band);
+		ieee80211_add_ext_srates_ie(sdata, skb, false,
+					    local->oper_channel->band);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	case WLAN_TDLS_SETUP_CONFIRM:
@@ -2725,6 +2746,7 @@
 			   u16 status_code, struct sk_buff *skb)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_mgmt *mgmt;
 
 	mgmt = (void *)skb_put(skb, 24);
@@ -2747,8 +2769,10 @@
 		mgmt->u.action.u.tdls_discover_resp.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(sdata, skb, false);
-		ieee80211_add_ext_srates_ie(sdata, skb, false);
+		ieee80211_add_srates_ie(sdata, skb, false,
+					local->oper_channel->band);
+		ieee80211_add_ext_srates_ie(sdata, skb, false,
+					    local->oper_channel->band);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	default:
@@ -3005,6 +3029,8 @@
 	.add_virtual_intf = ieee80211_add_iface,
 	.del_virtual_intf = ieee80211_del_iface,
 	.change_virtual_intf = ieee80211_change_iface,
+	.start_p2p_device = ieee80211_start_p2p_device,
+	.stop_p2p_device = ieee80211_stop_p2p_device,
 	.add_key = ieee80211_add_key,
 	.del_key = ieee80211_del_key,
 	.get_key = ieee80211_get_key,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b8dfb44..97173f8 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -63,8 +63,6 @@
 		      local->user_power_level);
 DEBUGFS_READONLY_FILE(power, "%d",
 		      local->hw.conf.power_level);
-DEBUGFS_READONLY_FILE(frequency, "%d",
-		      local->hw.conf.channel->center_freq);
 DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
 		      local->total_ps_buffered);
 DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
@@ -91,33 +89,6 @@
 	.llseek = noop_llseek,
 };
 
-static ssize_t channel_type_read(struct file *file, char __user *user_buf,
-		       size_t count, loff_t *ppos)
-{
-	struct ieee80211_local *local = file->private_data;
-	const char *buf;
-
-	switch (local->hw.conf.channel_type) {
-	case NL80211_CHAN_NO_HT:
-		buf = "no ht\n";
-		break;
-	case NL80211_CHAN_HT20:
-		buf = "ht20\n";
-		break;
-	case NL80211_CHAN_HT40MINUS:
-		buf = "ht40-\n";
-		break;
-	case NL80211_CHAN_HT40PLUS:
-		buf = "ht40+\n";
-		break;
-	default:
-		buf = "???";
-		break;
-	}
-
-	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
-}
-
 static ssize_t hwflags_read(struct file *file, char __user *user_buf,
 			    size_t count, loff_t *ppos)
 {
@@ -205,7 +176,6 @@
 }
 
 DEBUGFS_READONLY_FILE_OPS(hwflags);
-DEBUGFS_READONLY_FILE_OPS(channel_type);
 DEBUGFS_READONLY_FILE_OPS(queues);
 
 /* statistics stuff */
@@ -272,12 +242,10 @@
 
 	local->debugfs.keys = debugfs_create_dir("keys", phyd);
 
-	DEBUGFS_ADD(frequency);
 	DEBUGFS_ADD(total_ps_buffered);
 	DEBUGFS_ADD(wep_iv);
 	DEBUGFS_ADD(queues);
 	DEBUGFS_ADD_MODE(reset, 0200);
-	DEBUGFS_ADD(channel_type);
 	DEBUGFS_ADD(hwflags);
 	DEBUGFS_ADD(user_power);
 	DEBUGFS_ADD(power);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index df92031..da9003b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -9,7 +9,7 @@
 {
 	WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
 	     "%s:  Failed check-sdata-in-driver check, flags: 0x%x\n",
-	     sdata->dev->name, sdata->flags);
+	     sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
 }
 
 static inline struct ieee80211_sub_if_data *
@@ -22,9 +22,11 @@
 	return sdata;
 }
 
-static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
+static inline void drv_tx(struct ieee80211_local *local,
+			  struct ieee80211_tx_control *control,
+			  struct sk_buff *skb)
 {
-	local->ops->tx(&local->hw, skb);
+	local->ops->tx(&local->hw, control, skb);
 }
 
 static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
@@ -526,6 +528,9 @@
 	sdata = get_bss_sdata(sdata);
 	check_sdata_in_driver(sdata);
 
+	WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
+		sdata->vif.type != NL80211_IFTYPE_ADHOC);
+
 	trace_drv_sta_rc_update(local, sdata, sta, changed);
 	if (local->ops->sta_rc_update)
 		local->ops->sta_rc_update(&local->hw, &sdata->vif,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 5746d62..a9d9328 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -109,7 +109,7 @@
 	memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					  IEEE80211_STYPE_PROBE_RESP);
-	memset(mgmt->da, 0xff, ETH_ALEN);
+	eth_broadcast_addr(mgmt->da);
 	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 	memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
 	mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
@@ -205,7 +205,7 @@
 	mod_timer(&ifibss->timer,
 		  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
-	bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
+	bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
 					mgmt, skb->len, 0, GFP_KERNEL);
 	cfg80211_put_bss(bss);
 	netif_carrier_on(sdata->dev);
@@ -294,7 +294,7 @@
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
-	int band = local->hw.conf.channel->band;
+	int band = local->oper_channel->band;
 
 	/*
 	 * XXX: Consider removing the least recently used entry and
@@ -459,8 +459,11 @@
 			}
 		}
 
-		if (sta && rates_updated)
+		if (sta && rates_updated) {
+			drv_sta_rc_update(local, sdata, &sta->sta,
+					  IEEE80211_RC_SUPP_RATES_CHANGED);
 			rate_control_rate_init(sta);
+		}
 
 		rcu_read_unlock();
 	}
@@ -561,7 +564,7 @@
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
-	int band = local->hw.conf.channel->band;
+	int band = local->oper_channel->band;
 
 	/*
 	 * XXX: Consider removing the least recently used entry and
@@ -759,7 +762,7 @@
 				return;
 			}
 			sdata_info(sdata, "IBSS not allowed on %d MHz\n",
-				   local->hw.conf.channel->center_freq);
+				   local->oper_channel->center_freq);
 
 			/* No IBSS found - decrease scan interval and continue
 			 * scanning. */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index bb61f77..204bfed 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -193,8 +193,6 @@
 	struct sta_info *sta;
 	struct ieee80211_key *key;
 
-	struct ieee80211_channel *channel;
-
 	unsigned int flags;
 };
 
@@ -274,9 +272,15 @@
 	struct rcu_head rcu_head;
 };
 
+struct probe_resp {
+	struct rcu_head rcu_head;
+	int len;
+	u8 data[0];
+};
+
 struct ieee80211_if_ap {
 	struct beacon_data __rcu *beacon;
-	struct sk_buff __rcu *probe_resp;
+	struct probe_resp __rcu *probe_resp;
 
 	struct list_head vlans;
 
@@ -359,6 +363,7 @@
 	IEEE80211_STA_NULLFUNC_ACKED	= BIT(8),
 	IEEE80211_STA_RESET_SIGNAL_AVE	= BIT(9),
 	IEEE80211_STA_DISABLE_40MHZ	= BIT(10),
+	IEEE80211_STA_DISABLE_VHT	= BIT(11),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -1075,6 +1080,8 @@
 	struct idr ack_status_frames;
 	spinlock_t ack_status_lock;
 
+	struct ieee80211_sub_if_data __rcu *p2p_sdata;
+
 	/* dummy netdev for use w/ NAPI */
 	struct net_device napi_dev;
 
@@ -1131,7 +1138,7 @@
 	u8 *prep;
 	u8 *perr;
 	struct ieee80211_rann_ie *rann;
-	u8 *ch_switch_elem;
+	struct ieee80211_channel_sw_ie *ch_switch_ie;
 	u8 *country_elem;
 	u8 *pwr_constr_elem;
 	u8 *quiet_elem;	/* first quite element */
@@ -1157,7 +1164,6 @@
 	u8 preq_len;
 	u8 prep_len;
 	u8 perr_len;
-	u8 ch_switch_elem_len;
 	u8 country_elem_len;
 	u8 pwr_constr_elem_len;
 	u8 quiet_elem_len;
@@ -1202,6 +1208,7 @@
 void ieee80211_send_pspoll(struct ieee80211_local *local,
 			   struct ieee80211_sub_if_data *sdata);
 void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
 int ieee80211_max_network_latency(struct notifier_block *nb,
 				  unsigned long data, void *dummy);
 int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
@@ -1291,6 +1298,8 @@
 void ieee80211_recalc_idle(struct ieee80211_local *local);
 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
 				    const int offset);
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
 
 static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
 {
@@ -1425,7 +1434,6 @@
 			     struct ieee80211_hdr *hdr);
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
 			     struct ieee80211_hdr *hdr, bool ack);
-void ieee80211_beacon_connection_loss_work(struct work_struct *work);
 
 void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
 				     enum queue_stop_reason reason);
@@ -1457,13 +1465,15 @@
 			     u8 channel);
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
 					  u8 *dst, u32 ratemask,
+					  struct ieee80211_channel *chan,
 					  const u8 *ssid, size_t ssid_len,
 					  const u8 *ie, size_t ie_len,
 					  bool directed);
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
 			      const u8 *ssid, size_t ssid_len,
 			      const u8 *ie, size_t ie_len,
-			      u32 ratemask, bool directed, bool no_cck);
+			      u32 ratemask, bool directed, bool no_cck,
+			      struct ieee80211_channel *channel);
 
 void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
 				  const size_t supp_rates_len,
@@ -1487,9 +1497,11 @@
 u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
 			       u32 cap);
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
-			    struct sk_buff *skb, bool need_basic);
+			    struct sk_buff *skb, bool need_basic,
+			    enum ieee80211_band band);
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
-				struct sk_buff *skb, bool need_basic);
+				struct sk_buff *skb, bool need_basic,
+				enum ieee80211_band band);
 
 /* channel management */
 enum ieee80211_chan_mode {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bfb57dc..59f8adc 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -100,6 +100,10 @@
 			sdata->vif.bss_conf.idle = true;
 			continue;
 		}
+
+		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+			continue;
+
 		/* count everything else */
 		sdata->vif.bss_conf.idle = false;
 		count++;
@@ -121,7 +125,8 @@
 
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
-		    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+		    sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+		    sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
 			continue;
 		if (sdata->old_idle == sdata->vif.bss_conf.idle)
 			continue;
@@ -204,6 +209,8 @@
 {
 	return type1 == NL80211_IFTYPE_MONITOR ||
 		type2 == NL80211_IFTYPE_MONITOR ||
+		type1 == NL80211_IFTYPE_P2P_DEVICE ||
+		type2 == NL80211_IFTYPE_P2P_DEVICE ||
 		(type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
 		(type1 == NL80211_IFTYPE_WDS &&
 			(type2 == NL80211_IFTYPE_WDS ||
@@ -406,9 +413,10 @@
  * an error on interface type changes that have been pre-checked, so most
  * checks should be in ieee80211_check_concurrent_iface.
  */
-static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+	struct net_device *dev = wdev->netdev;
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 	u32 changed = 0;
@@ -443,6 +451,7 @@
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_MONITOR:
 	case NL80211_IFTYPE_ADHOC:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		/* no special treatment */
 		break;
 	case NL80211_IFTYPE_UNSPECIFIED:
@@ -471,7 +480,7 @@
 	 * Copy the hopefully now-present MAC address to
 	 * this interface, if it has the special null one.
 	 */
-	if (is_zero_ether_addr(dev->dev_addr)) {
+	if (dev && is_zero_ether_addr(dev->dev_addr)) {
 		memcpy(dev->dev_addr,
 		       local->hw.wiphy->perm_addr,
 		       ETH_ALEN);
@@ -536,15 +545,23 @@
 			local->fif_probe_req++;
 		}
 
-		changed |= ieee80211_reset_erp_info(sdata);
+		if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+			changed |= ieee80211_reset_erp_info(sdata);
 		ieee80211_bss_info_change_notify(sdata, changed);
 
-		if (sdata->vif.type == NL80211_IFTYPE_STATION ||
-		    sdata->vif.type == NL80211_IFTYPE_ADHOC ||
-		    sdata->vif.type == NL80211_IFTYPE_AP)
+		switch (sdata->vif.type) {
+		case NL80211_IFTYPE_STATION:
+		case NL80211_IFTYPE_ADHOC:
+		case NL80211_IFTYPE_AP:
+		case NL80211_IFTYPE_MESH_POINT:
 			netif_carrier_off(dev);
-		else
+			break;
+		case NL80211_IFTYPE_WDS:
+		case NL80211_IFTYPE_P2P_DEVICE:
+			break;
+		default:
 			netif_carrier_on(dev);
+		}
 
 		/*
 		 * set default queue parameters so drivers don't
@@ -576,6 +593,9 @@
 		}
 
 		rate_control_rate_init(sta);
+		netif_carrier_on(dev);
+	} else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+		rcu_assign_pointer(local->p2p_sdata, sdata);
 	}
 
 	/*
@@ -601,7 +621,8 @@
 
 	ieee80211_recalc_ps(local, -1);
 
-	netif_tx_start_all_queues(dev);
+	if (dev)
+		netif_tx_start_all_queues(dev);
 
 	return 0;
  err_del_interface:
@@ -631,7 +652,7 @@
 	if (err)
 		return err;
 
-	return ieee80211_do_open(dev, true);
+	return ieee80211_do_open(&sdata->wdev, true);
 }
 
 static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
@@ -652,7 +673,8 @@
 	/*
 	 * Stop TX on this interface first.
 	 */
-	netif_tx_stop_all_queues(sdata->dev);
+	if (sdata->dev)
+		netif_tx_stop_all_queues(sdata->dev);
 
 	ieee80211_roc_purge(sdata);
 
@@ -691,14 +713,16 @@
 		local->fif_probe_req--;
 	}
 
-	netif_addr_lock_bh(sdata->dev);
-	spin_lock_bh(&local->filter_lock);
-	__hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
-			 sdata->dev->addr_len);
-	spin_unlock_bh(&local->filter_lock);
-	netif_addr_unlock_bh(sdata->dev);
+	if (sdata->dev) {
+		netif_addr_lock_bh(sdata->dev);
+		spin_lock_bh(&local->filter_lock);
+		__hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
+				 sdata->dev->addr_len);
+		spin_unlock_bh(&local->filter_lock);
+		netif_addr_unlock_bh(sdata->dev);
 
-	ieee80211_configure_filter(local);
+		ieee80211_configure_filter(local);
+	}
 
 	del_timer_sync(&local->dynamic_ps_timer);
 	cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -708,7 +732,7 @@
 		struct ieee80211_sub_if_data *vlan, *tmpsdata;
 		struct beacon_data *old_beacon =
 			rtnl_dereference(sdata->u.ap.beacon);
-		struct sk_buff *old_probe_resp =
+		struct probe_resp *old_probe_resp =
 			rtnl_dereference(sdata->u.ap.probe_resp);
 
 		/* sdata_running will return false, so this will disable */
@@ -720,7 +744,7 @@
 		RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
 		synchronize_rcu();
 		kfree(old_beacon);
-		kfree_skb(old_probe_resp);
+		kfree(old_probe_resp);
 
 		/* down all dependent devices, that is VLANs */
 		list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -759,6 +783,10 @@
 		ieee80211_adjust_monitor_flags(sdata, -1);
 		ieee80211_configure_filter(local);
 		break;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		/* relies on synchronize_rcu() below */
+		rcu_assign_pointer(local->p2p_sdata, NULL);
+		/* fall through */
 	default:
 		flush_work(&sdata->work);
 		/*
@@ -771,14 +799,6 @@
 		skb_queue_purge(&sdata->skb_queue);
 
 		/*
-		 * Disable beaconing here for mesh only, AP and IBSS
-		 * are already taken care of.
-		 */
-		if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-			ieee80211_bss_info_change_notify(sdata,
-				BSS_CHANGED_BEACON_ENABLED);
-
-		/*
 		 * Free all remaining keys, there shouldn't be any,
 		 * except maybe group keys in AP more or WDS?
 		 */
@@ -877,9 +897,8 @@
  * Called when the netdev is removed or, by the code below, before
  * the interface type changes.
  */
-static void ieee80211_teardown_sdata(struct net_device *dev)
+static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
 	int flushed;
 	int i;
@@ -900,6 +919,11 @@
 	WARN_ON(flushed);
 }
 
+static void ieee80211_uninit(struct net_device *dev)
+{
+	ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
+}
+
 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
 					 struct sk_buff *skb)
 {
@@ -909,7 +933,7 @@
 static const struct net_device_ops ieee80211_dataif_ops = {
 	.ndo_open		= ieee80211_open,
 	.ndo_stop		= ieee80211_stop,
-	.ndo_uninit		= ieee80211_teardown_sdata,
+	.ndo_uninit		= ieee80211_uninit,
 	.ndo_start_xmit		= ieee80211_subif_start_xmit,
 	.ndo_set_rx_mode	= ieee80211_set_multicast_list,
 	.ndo_change_mtu 	= ieee80211_change_mtu,
@@ -940,7 +964,7 @@
 static const struct net_device_ops ieee80211_monitorif_ops = {
 	.ndo_open		= ieee80211_open,
 	.ndo_stop		= ieee80211_stop,
-	.ndo_uninit		= ieee80211_teardown_sdata,
+	.ndo_uninit		= ieee80211_uninit,
 	.ndo_start_xmit		= ieee80211_monitor_start_xmit,
 	.ndo_set_rx_mode	= ieee80211_set_multicast_list,
 	.ndo_change_mtu 	= ieee80211_change_mtu,
@@ -1099,7 +1123,6 @@
 	/* and set some type-dependent values */
 	sdata->vif.type = type;
 	sdata->vif.p2p = false;
-	sdata->dev->netdev_ops = &ieee80211_dataif_ops;
 	sdata->wdev.iftype = type;
 
 	sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
@@ -1107,8 +1130,11 @@
 
 	sdata->noack_map = 0;
 
-	/* only monitor differs */
-	sdata->dev->type = ARPHRD_ETHER;
+	/* only monitor/p2p-device differ */
+	if (sdata->dev) {
+		sdata->dev->netdev_ops = &ieee80211_dataif_ops;
+		sdata->dev->type = ARPHRD_ETHER;
+	}
 
 	skb_queue_head_init(&sdata->skb_queue);
 	INIT_WORK(&sdata->work, ieee80211_iface_work);
@@ -1146,6 +1172,7 @@
 		break;
 	case NL80211_IFTYPE_WDS:
 	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		break;
 	case NL80211_IFTYPE_UNSPECIFIED:
 	case NUM_NL80211_IFTYPES:
@@ -1156,18 +1183,6 @@
 	ieee80211_debugfs_add_netdev(sdata);
 }
 
-static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
-{
-	switch (sdata->vif.type) {
-	case NL80211_IFTYPE_MESH_POINT:
-		mesh_path_flush_by_iface(sdata);
-		break;
-
-	default:
-		break;
-	}
-}
-
 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 					   enum nl80211_iftype type)
 {
@@ -1225,7 +1240,7 @@
 
 	ieee80211_do_stop(sdata, false);
 
-	ieee80211_teardown_sdata(sdata->dev);
+	ieee80211_teardown_sdata(sdata);
 
 	ret = drv_change_interface(local, sdata, internal_type, p2p);
 	if (ret)
@@ -1240,7 +1255,7 @@
 
 	ieee80211_setup_sdata(sdata, type);
 
-	err = ieee80211_do_open(sdata->dev, false);
+	err = ieee80211_do_open(&sdata->wdev, false);
 	WARN(err, "type change: do_open returned %d", err);
 
 	return ret;
@@ -1267,14 +1282,14 @@
 			return ret;
 	} else {
 		/* Purge and reset type-dependent state. */
-		ieee80211_teardown_sdata(sdata->dev);
+		ieee80211_teardown_sdata(sdata);
 		ieee80211_setup_sdata(sdata, type);
 	}
 
 	/* reset some values that shouldn't be kept across type changes */
 	sdata->vif.bss_conf.basic_rates =
 		ieee80211_mandatory_rates(sdata->local,
-			sdata->local->hw.conf.channel->band);
+			sdata->local->oper_channel->band);
 	sdata->drop_unencrypted = 0;
 	if (type == NL80211_IFTYPE_STATION)
 		sdata->u.mgd.use_4addr = false;
@@ -1283,8 +1298,7 @@
 }
 
 static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
-				       struct net_device *dev,
-				       enum nl80211_iftype type)
+				       u8 *perm_addr, enum nl80211_iftype type)
 {
 	struct ieee80211_sub_if_data *sdata;
 	u64 mask, start, addr, val, inc;
@@ -1293,13 +1307,12 @@
 	int i;
 
 	/* default ... something at least */
-	memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
+	memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
 
 	if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
 	    local->hw.wiphy->n_addresses <= 1)
 		return;
 
-
 	mutex_lock(&local->iflist_mtx);
 
 	switch (type) {
@@ -1312,11 +1325,24 @@
 		list_for_each_entry(sdata, &local->interfaces, list) {
 			if (sdata->vif.type != NL80211_IFTYPE_AP)
 				continue;
-			memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
+			memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
 			break;
 		}
 		/* keep default if no AP interface present */
 		break;
+	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_P2P_GO:
+		if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
+			list_for_each_entry(sdata, &local->interfaces, list) {
+				if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+					continue;
+				if (!ieee80211_sdata_running(sdata))
+					continue;
+				memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
+				goto out_unlock;
+			}
+		}
+		/* otherwise fall through */
 	default:
 		/* assign a new address if possible -- try n_addresses first */
 		for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
@@ -1331,7 +1357,7 @@
 			}
 
 			if (!used) {
-				memcpy(dev->perm_addr,
+				memcpy(perm_addr,
 				       local->hw.wiphy->addresses[i].addr,
 				       ETH_ALEN);
 				break;
@@ -1382,7 +1408,7 @@
 			}
 
 			if (!used) {
-				memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
+				memcpy(perm_addr, tmp_addr, ETH_ALEN);
 				break;
 			}
 			addr = (start & ~mask) | (val & mask);
@@ -1391,6 +1417,7 @@
 		break;
 	}
 
+ out_unlock:
 	mutex_unlock(&local->iflist_mtx);
 }
 
@@ -1398,49 +1425,68 @@
 		     struct wireless_dev **new_wdev, enum nl80211_iftype type,
 		     struct vif_params *params)
 {
-	struct net_device *ndev;
+	struct net_device *ndev = NULL;
 	struct ieee80211_sub_if_data *sdata = NULL;
 	int ret, i;
 	int txqs = 1;
 
 	ASSERT_RTNL();
 
-	if (local->hw.queues >= IEEE80211_NUM_ACS)
-		txqs = IEEE80211_NUM_ACS;
+	if (type == NL80211_IFTYPE_P2P_DEVICE) {
+		struct wireless_dev *wdev;
 
-	ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
-				name, ieee80211_if_setup, txqs, 1);
-	if (!ndev)
-		return -ENOMEM;
-	dev_net_set(ndev, wiphy_net(local->hw.wiphy));
+		sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size,
+				GFP_KERNEL);
+		if (!sdata)
+			return -ENOMEM;
+		wdev = &sdata->wdev;
 
-	ndev->needed_headroom = local->tx_headroom +
-				4*6 /* four MAC addresses */
-				+ 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
-				+ 6 /* mesh */
-				+ 8 /* rfc1042/bridge tunnel */
-				- ETH_HLEN /* ethernet hard_header_len */
-				+ IEEE80211_ENCRYPT_HEADROOM;
-	ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
+		sdata->dev = NULL;
+		strlcpy(sdata->name, name, IFNAMSIZ);
+		ieee80211_assign_perm_addr(local, wdev->address, type);
+		memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
+	} else {
+		if (local->hw.queues >= IEEE80211_NUM_ACS)
+			txqs = IEEE80211_NUM_ACS;
 
-	ret = dev_alloc_name(ndev, ndev->name);
-	if (ret < 0)
-		goto fail;
+		ndev = alloc_netdev_mqs(sizeof(*sdata) +
+					local->hw.vif_data_size,
+					name, ieee80211_if_setup, txqs, 1);
+		if (!ndev)
+			return -ENOMEM;
+		dev_net_set(ndev, wiphy_net(local->hw.wiphy));
 
-	ieee80211_assign_perm_addr(local, ndev, type);
-	memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
-	SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
+		ndev->needed_headroom = local->tx_headroom +
+					4*6 /* four MAC addresses */
+					+ 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
+					+ 6 /* mesh */
+					+ 8 /* rfc1042/bridge tunnel */
+					- ETH_HLEN /* ethernet hard_header_len */
+					+ IEEE80211_ENCRYPT_HEADROOM;
+		ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
 
-	/* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
-	sdata = netdev_priv(ndev);
-	ndev->ieee80211_ptr = &sdata->wdev;
-	memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
-	memcpy(sdata->name, ndev->name, IFNAMSIZ);
+		ret = dev_alloc_name(ndev, ndev->name);
+		if (ret < 0) {
+			free_netdev(ndev);
+			return ret;
+		}
+
+		ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
+		memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
+		SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
+
+		/* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */
+		sdata = netdev_priv(ndev);
+		ndev->ieee80211_ptr = &sdata->wdev;
+		memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
+		memcpy(sdata->name, ndev->name, IFNAMSIZ);
+
+		sdata->dev = ndev;
+	}
 
 	/* initialise type-independent data */
 	sdata->wdev.wiphy = local->hw.wiphy;
 	sdata->local = local;
-	sdata->dev = ndev;
 #ifdef CONFIG_INET
 	sdata->arp_filter_state = true;
 #endif
@@ -1469,18 +1515,22 @@
 	/* setup type-dependent data */
 	ieee80211_setup_sdata(sdata, type);
 
-	if (params) {
-		ndev->ieee80211_ptr->use_4addr = params->use_4addr;
-		if (type == NL80211_IFTYPE_STATION)
-			sdata->u.mgd.use_4addr = params->use_4addr;
+	if (ndev) {
+		if (params) {
+			ndev->ieee80211_ptr->use_4addr = params->use_4addr;
+			if (type == NL80211_IFTYPE_STATION)
+				sdata->u.mgd.use_4addr = params->use_4addr;
+		}
+
+		ndev->features |= local->hw.netdev_features;
+
+		ret = register_netdevice(ndev);
+		if (ret) {
+			free_netdev(ndev);
+			return ret;
+		}
 	}
 
-	ndev->features |= local->hw.netdev_features;
-
-	ret = register_netdevice(ndev);
-	if (ret)
-		goto fail;
-
 	mutex_lock(&local->iflist_mtx);
 	list_add_tail_rcu(&sdata->list, &local->interfaces);
 	mutex_unlock(&local->iflist_mtx);
@@ -1489,10 +1539,6 @@
 		*new_wdev = &sdata->wdev;
 
 	return 0;
-
- fail:
-	free_netdev(ndev);
-	return ret;
 }
 
 void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
@@ -1503,11 +1549,22 @@
 	list_del_rcu(&sdata->list);
 	mutex_unlock(&sdata->local->iflist_mtx);
 
-	/* clean up type-dependent data */
-	ieee80211_clean_sdata(sdata);
-
 	synchronize_rcu();
-	unregister_netdevice(sdata->dev);
+
+	if (sdata->dev) {
+		unregister_netdevice(sdata->dev);
+	} else {
+		cfg80211_unregister_wdev(&sdata->wdev);
+		kfree(sdata);
+	}
+}
+
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
+{
+	if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
+		return;
+	ieee80211_do_stop(sdata, true);
+	ieee80211_teardown_sdata(sdata);
 }
 
 /*
@@ -1518,6 +1575,7 @@
 {
 	struct ieee80211_sub_if_data *sdata, *tmp;
 	LIST_HEAD(unreg_list);
+	LIST_HEAD(wdev_list);
 
 	ASSERT_RTNL();
 
@@ -1525,13 +1583,20 @@
 	list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
 		list_del(&sdata->list);
 
-		ieee80211_clean_sdata(sdata);
-
-		unregister_netdevice_queue(sdata->dev, &unreg_list);
+		if (sdata->dev)
+			unregister_netdevice_queue(sdata->dev, &unreg_list);
+		else
+			list_add(&sdata->list, &wdev_list);
 	}
 	mutex_unlock(&local->iflist_mtx);
 	unregister_netdevice_many(&unreg_list);
 	list_del(&unreg_list);
+
+	list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
+		list_del(&sdata->list);
+		cfg80211_unregister_wdev(&sdata->wdev);
+		kfree(sdata);
+	}
 }
 
 static int netdev_notify(struct notifier_block *nb,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index c26e231..bd75293 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -207,6 +207,10 @@
 		sdata->vif.bss_conf.bssid = NULL;
 	else if (ieee80211_vif_is_mesh(&sdata->vif)) {
 		sdata->vif.bss_conf.bssid = zero;
+	} else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+		sdata->vif.bss_conf.bssid = sdata->vif.addr;
+		WARN_ONCE(changed & ~(BSS_CHANGED_IDLE),
+			  "P2P Device BSS changed %#x", changed);
 	} else {
 		WARN_ON(1);
 		return;
@@ -514,6 +518,11 @@
 			BIT(IEEE80211_STYPE_AUTH >> 4) |
 			BIT(IEEE80211_STYPE_DEAUTH >> 4),
 	},
+	[NL80211_IFTYPE_P2P_DEVICE] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+			BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+	},
 };
 
 static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
@@ -536,6 +545,11 @@
 	int priv_size, i;
 	struct wiphy *wiphy;
 
+	if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
+		    !ops->add_interface || !ops->remove_interface ||
+		    !ops->configure_filter))
+		return NULL;
+
 	if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
 		return NULL;
 
@@ -588,13 +602,6 @@
 
 	local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
 
-	BUG_ON(!ops->tx);
-	BUG_ON(!ops->start);
-	BUG_ON(!ops->stop);
-	BUG_ON(!ops->config);
-	BUG_ON(!ops->add_interface);
-	BUG_ON(!ops->remove_interface);
-	BUG_ON(!ops->configure_filter);
 	local->ops = ops;
 
 	/* set up some defaults */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6fac18c..ff0296c 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -109,11 +109,11 @@
 
 	/* Disallow HT40+/- mismatch */
 	if (ie->ht_operation &&
-	    (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
-	    local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
+	    (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
+	     sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
 	    (sta_channel_type == NL80211_CHAN_HT40MINUS ||
 	     sta_channel_type == NL80211_CHAN_HT40PLUS) &&
-	    local->_oper_channel_type != sta_channel_type)
+	    sdata->vif.bss_conf.channel_type != sta_channel_type)
 		goto mismatch;
 
 	return true;
@@ -136,10 +136,13 @@
  * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
  *
  * @sdata: mesh interface in which mesh beacons are going to be updated
+ *
+ * Returns: beacon changed flag if the beacon content changed.
  */
-void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
+u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
 {
 	bool free_plinks;
+	u32 changed = 0;
 
 	/* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
 	 * the mesh interface might be able to establish plinks with peers that
@@ -149,8 +152,12 @@
 	 */
 	free_plinks = mesh_plink_availables(sdata);
 
-	if (free_plinks != sdata->u.mesh.accepting_plinks)
-		ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
+	if (free_plinks != sdata->u.mesh.accepting_plinks) {
+		sdata->u.mesh.accepting_plinks = free_plinks;
+		changed = BSS_CHANGED_BEACON;
+	}
+
+	return changed;
 }
 
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -262,7 +269,6 @@
 	neighbors = (neighbors > 15) ? 15 : neighbors;
 	*pos++ = neighbors << 1;
 	/* Mesh capability */
-	ifmsh->accepting_plinks = mesh_plink_availables(sdata);
 	*pos = MESHCONF_CAPAB_FORWARDING;
 	*pos |= ifmsh->accepting_plinks ?
 	    MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
@@ -349,17 +355,18 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *chan = local->oper_channel;
 	u8 *pos;
 
 	if (skb_tailroom(skb) < 3)
 		return -ENOMEM;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	sband = local->hw.wiphy->bands[chan->band];
 	if (sband->band == IEEE80211_BAND_2GHZ) {
 		pos = skb_put(skb, 2 + 1);
 		*pos++ = WLAN_EID_DS_PARAMS;
 		*pos++ = 1;
-		*pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+		*pos++ = ieee80211_frequency_to_channel(chan->center_freq);
 	}
 
 	return 0;
@@ -374,7 +381,7 @@
 
 	sband = local->hw.wiphy->bands[local->oper_channel->band];
 	if (!sband->ht_cap.ht_supported ||
-	    local->_oper_channel_type == NL80211_CHAN_NO_HT)
+	    sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
 		return 0;
 
 	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -391,7 +398,8 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_channel *channel = local->oper_channel;
-	enum nl80211_channel_type channel_type = local->_oper_channel_type;
+	enum nl80211_channel_type channel_type =
+				sdata->vif.bss_conf.channel_type;
 	struct ieee80211_supported_band *sband =
 				local->hw.wiphy->bands[channel->band];
 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
@@ -521,14 +529,13 @@
 static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
 			   struct ieee80211_if_mesh *ifmsh)
 {
-	bool free_plinks;
+	u32 changed;
 
 	ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
 	mesh_path_expire(sdata);
 
-	free_plinks = mesh_plink_availables(sdata);
-	if (free_plinks != sdata->u.mesh.accepting_plinks)
-		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+	changed = mesh_accept_plinks_update(sdata);
+	ieee80211_bss_info_change_notify(sdata, changed);
 
 	mod_timer(&ifmsh->housekeeping_timer,
 		  round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
@@ -603,12 +610,14 @@
 	sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
 	sdata->vif.bss_conf.basic_rates =
 		ieee80211_mandatory_rates(sdata->local,
-					  sdata->local->hw.conf.channel->band);
+					  sdata->local->oper_channel->band);
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
 						BSS_CHANGED_BEACON_ENABLED |
 						BSS_CHANGED_HT |
 						BSS_CHANGED_BASIC_RATES |
 						BSS_CHANGED_BEACON_INT);
+
+	netif_carrier_on(sdata->dev);
 }
 
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -616,12 +625,19 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
+	netif_carrier_off(sdata->dev);
+
+	/* stop the beacon */
 	ifmsh->mesh_id_len = 0;
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
-	sta_info_flush(local, NULL);
+
+	/* flush STAs and mpaths on this iface */
+	sta_info_flush(sdata->local, sdata);
+	mesh_path_flush_by_iface(sdata);
 
 	del_timer_sync(&sdata->u.mesh.housekeeping_timer);
 	del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
+	del_timer_sync(&sdata->u.mesh.mesh_path_timer);
 	/*
 	 * If the timer fired while we waited for it, it will have
 	 * requeued the work. Now the work will be running again
@@ -634,6 +650,8 @@
 	local->fif_other_bss--;
 	atomic_dec(&local->iff_allmultis);
 	ieee80211_configure_filter(local);
+
+	sdata->u.mesh.timers_running = 0;
 }
 
 static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index faaa39b..25d0f17 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -215,6 +215,9 @@
 /* Maximum number of paths per interface */
 #define MESH_MAX_MPATHS		1024
 
+/* Number of frames buffered per destination for unresolved destinations */
+#define MESH_FRAME_QUEUE_LEN	10
+
 /* Public interfaces */
 /* Various */
 int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -282,7 +285,7 @@
 			   u8 *hw_addr,
 			   struct ieee802_11_elems *ie);
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
-void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
+u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_broken(struct sta_info *sta);
 void mesh_plink_deactivate(struct sta_info *sta);
 int mesh_plink_open(struct sta_info *sta);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 494bc39..47aeee2 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -17,8 +17,6 @@
 #define MAX_METRIC	0xffffffff
 #define ARITH_SHIFT	8
 
-/* Number of frames buffered per destination for unresolved destinations */
-#define MESH_FRAME_QUEUE_LEN	10
 #define MAX_PREQ_QUEUE_LEN	64
 
 /* Destination only */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 075bc53..aa74981 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -203,23 +203,17 @@
 {
 	struct sk_buff *skb;
 	struct ieee80211_hdr *hdr;
-	struct sk_buff_head tmpq;
 	unsigned long flags;
 
 	rcu_assign_pointer(mpath->next_hop, sta);
 
-	__skb_queue_head_init(&tmpq);
-
 	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
-
-	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
+	skb_queue_walk(&mpath->frame_queue, skb) {
 		hdr = (struct ieee80211_hdr *) skb->data;
 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
 		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
-		__skb_queue_tail(&tmpq, skb);
 	}
 
-	skb_queue_splice(&tmpq, &mpath->frame_queue);
 	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
 }
 
@@ -285,40 +279,42 @@
 				    struct mesh_path *from_mpath,
 				    bool copy)
 {
-	struct sk_buff *skb, *cp_skb = NULL;
-	struct sk_buff_head gateq, failq;
+	struct sk_buff *skb, *fskb, *tmp;
+	struct sk_buff_head failq;
 	unsigned long flags;
-	int num_skbs;
 
 	BUG_ON(gate_mpath == from_mpath);
 	BUG_ON(!gate_mpath->next_hop);
 
-	__skb_queue_head_init(&gateq);
 	__skb_queue_head_init(&failq);
 
 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
 	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
 
-	num_skbs = skb_queue_len(&failq);
-
-	while (num_skbs--) {
-		skb = __skb_dequeue(&failq);
-		if (copy) {
-			cp_skb = skb_copy(skb, GFP_ATOMIC);
-			if (cp_skb)
-				__skb_queue_tail(&failq, cp_skb);
+	skb_queue_walk_safe(&failq, fskb, tmp) {
+		if (skb_queue_len(&gate_mpath->frame_queue) >=
+				  MESH_FRAME_QUEUE_LEN) {
+			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
+			break;
 		}
 
+		skb = skb_copy(fskb, GFP_ATOMIC);
+		if (WARN_ON(!skb))
+			break;
+
 		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
-		__skb_queue_tail(&gateq, skb);
+		skb_queue_tail(&gate_mpath->frame_queue, skb);
+
+		if (copy)
+			continue;
+
+		__skb_unlink(fskb, &failq);
+		kfree_skb(fskb);
 	}
 
-	spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
-	skb_queue_splice(&gateq, &gate_mpath->frame_queue);
 	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
 		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
-	spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
 
 	if (!copy)
 		return;
@@ -531,7 +527,7 @@
 
 	read_lock_bh(&pathtbl_resize_lock);
 	memcpy(new_mpath->dst, dst, ETH_ALEN);
-	memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
+	eth_broadcast_addr(new_mpath->rann_snd_addr);
 	new_mpath->is_root = false;
 	new_mpath->sdata = sdata;
 	new_mpath->flags = 0;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index af671b9..9d7ad36 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -48,17 +48,17 @@
 		u8 *da, __le16 llid, __le16 plid, __le16 reason);
 
 static inline
-void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
+u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
 {
 	atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
-	mesh_accept_plinks_update(sdata);
+	return mesh_accept_plinks_update(sdata);
 }
 
 static inline
-void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
+u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
 {
 	atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
-	mesh_accept_plinks_update(sdata);
+	return mesh_accept_plinks_update(sdata);
 }
 
 /**
@@ -117,7 +117,7 @@
 	u16 ht_opmode;
 	bool non_ht_sta = false, ht20_sta = false;
 
-	if (local->_oper_channel_type == NL80211_CHAN_NO_HT)
+	if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
 		return 0;
 
 	rcu_read_lock();
@@ -147,7 +147,8 @@
 
 	if (non_ht_sta)
 		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
-	else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20)
+	else if (ht20_sta &&
+		 sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20)
 		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
 	else
 		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -170,22 +171,21 @@
  * @sta: mesh peer link to deactivate
  *
  * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
  *
  * Locking: the caller must hold sta->lock
  */
-static bool __mesh_plink_deactivate(struct sta_info *sta)
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	bool deactivated = false;
+	u32 changed = 0;
 
-	if (sta->plink_state == NL80211_PLINK_ESTAB) {
-		mesh_plink_dec_estab_count(sdata);
-		deactivated = true;
-	}
+	if (sta->plink_state == NL80211_PLINK_ESTAB)
+		changed = mesh_plink_dec_estab_count(sdata);
 	sta->plink_state = NL80211_PLINK_BLOCKED;
 	mesh_path_flush_by_nexthop(sta);
 
-	return deactivated;
+	return changed;
 }
 
 /**
@@ -198,18 +198,17 @@
 void mesh_plink_deactivate(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	bool deactivated;
+	u32 changed;
 
 	spin_lock_bh(&sta->lock);
-	deactivated = __mesh_plink_deactivate(sta);
+	changed = __mesh_plink_deactivate(sta);
 	sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
 	mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
 			    sta->sta.addr, sta->llid, sta->plid,
 			    sta->reason);
 	spin_unlock_bh(&sta->lock);
 
-	if (deactivated)
-		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+	ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -217,12 +216,14 @@
 		u8 *da, __le16 llid, __le16 plid, __le16 reason) {
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb;
+	struct ieee80211_tx_info *info;
 	struct ieee80211_mgmt *mgmt;
 	bool include_plid = false;
 	u16 peering_proto = 0;
 	u8 *pos, ie_len = 4;
 	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
 		      sizeof(mgmt->u.action.u.self_prot);
+	int err = -ENOMEM;
 
 	skb = dev_alloc_skb(local->tx_headroom +
 			    hdr_len +
@@ -238,6 +239,7 @@
 			    sdata->u.mesh.ie_len);
 	if (!skb)
 		return -1;
+	info = IEEE80211_SKB_CB(skb);
 	skb_reserve(skb, local->tx_headroom);
 	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
 	memset(mgmt, 0, hdr_len);
@@ -258,15 +260,18 @@
 			pos = skb_put(skb, 2);
 			memcpy(pos + 2, &plid, 2);
 		}
-		if (ieee80211_add_srates_ie(sdata, skb, true) ||
-		    ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+		if (ieee80211_add_srates_ie(sdata, skb, true,
+					    local->oper_channel->band) ||
+		    ieee80211_add_ext_srates_ie(sdata, skb, true,
+						local->oper_channel->band) ||
 		    mesh_add_rsn_ie(skb, sdata) ||
 		    mesh_add_meshid_ie(skb, sdata) ||
 		    mesh_add_meshconf_ie(skb, sdata))
-			return -1;
+			goto free;
 	} else {	/* WLAN_SP_MESH_PEERING_CLOSE */
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
 		if (mesh_add_meshid_ie(skb, sdata))
-			return -1;
+			goto free;
 	}
 
 	/* Add Mesh Peering Management element */
@@ -285,11 +290,12 @@
 		ie_len += 2;	/* reason code */
 		break;
 	default:
-		return -EINVAL;
+		err = -EINVAL;
+		goto free;
 	}
 
 	if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
-		return -ENOMEM;
+		goto free;
 
 	pos = skb_put(skb, 2 + ie_len);
 	*pos++ = WLAN_EID_PEER_MGMT;
@@ -310,14 +316,17 @@
 	if (action != WLAN_SP_MESH_PEERING_CLOSE) {
 		if (mesh_add_ht_cap_ie(skb, sdata) ||
 		    mesh_add_ht_oper_ie(skb, sdata))
-			return -1;
+			goto free;
 	}
 
 	if (mesh_add_vendor_ies(skb, sdata))
-		return -1;
+		goto free;
 
 	ieee80211_tx_skb(sdata, skb);
 	return 0;
+free:
+	kfree_skb(skb);
+	return err;
 }
 
 /**
@@ -362,9 +371,14 @@
 
 	spin_lock_bh(&sta->lock);
 	sta->last_rx = jiffies;
+	if (sta->plink_state == NL80211_PLINK_ESTAB) {
+		spin_unlock_bh(&sta->lock);
+		return sta;
+	}
+
 	sta->sta.supp_rates[band] = rates;
 	if (elems->ht_cap_elem &&
-	    sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
+	    sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT)
 		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 						  elems->ht_cap_elem,
 						  &sta->sta.ht_cap);
@@ -541,15 +555,14 @@
 void mesh_plink_block(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	bool deactivated;
+	u32 changed;
 
 	spin_lock_bh(&sta->lock);
-	deactivated = __mesh_plink_deactivate(sta);
+	changed = __mesh_plink_deactivate(sta);
 	sta->plink_state = NL80211_PLINK_BLOCKED;
 	spin_unlock_bh(&sta->lock);
 
-	if (deactivated)
-		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+	ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 
@@ -852,9 +865,8 @@
 			del_timer(&sta->plink_timer);
 			sta->plink_state = NL80211_PLINK_ESTAB;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_inc_estab_count(sdata);
+			changed |= mesh_plink_inc_estab_count(sdata);
 			changed |= mesh_set_ht_prot_mode(sdata);
-			changed |= BSS_CHANGED_BEACON;
 			mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
 				sta->sta.addr);
 			break;
@@ -888,9 +900,8 @@
 			del_timer(&sta->plink_timer);
 			sta->plink_state = NL80211_PLINK_ESTAB;
 			spin_unlock_bh(&sta->lock);
-			mesh_plink_inc_estab_count(sdata);
+			changed |= mesh_plink_inc_estab_count(sdata);
 			changed |= mesh_set_ht_prot_mode(sdata);
-			changed |= BSS_CHANGED_BEACON;
 			mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
 				sta->sta.addr);
 			mesh_plink_frame_tx(sdata,
@@ -908,13 +919,12 @@
 		case CLS_ACPT:
 			reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
 			sta->reason = reason;
-			__mesh_plink_deactivate(sta);
+			changed |= __mesh_plink_deactivate(sta);
 			sta->plink_state = NL80211_PLINK_HOLDING;
 			llid = sta->llid;
 			mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
 			spin_unlock_bh(&sta->lock);
 			changed |= mesh_set_ht_prot_mode(sdata);
-			changed |= BSS_CHANGED_BEACON;
 			mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
 					    sta->sta.addr, llid, plid, reason);
 			break;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cef0c9e..a8cf70b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -146,6 +146,9 @@
 	if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
 		return;
 
+	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+		return;
+
 	mod_timer(&sdata->u.mgd.bcn_mon_timer,
 		  round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
 }
@@ -182,15 +185,15 @@
 	u16 ht_opmode;
 	bool disable_40 = false;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	sband = local->hw.wiphy->bands[local->oper_channel->band];
 
 	switch (sdata->vif.bss_conf.channel_type) {
 	case NL80211_CHAN_HT40PLUS:
-		if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+		if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
 			disable_40 = true;
 		break;
 	case NL80211_CHAN_HT40MINUS:
-		if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+		if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
 			disable_40 = true;
 		break;
 	default:
@@ -326,6 +329,26 @@
 	ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
+static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
+				 struct sk_buff *skb,
+				 struct ieee80211_supported_band *sband)
+{
+	u8 *pos;
+	u32 cap;
+	struct ieee80211_sta_vht_cap vht_cap;
+
+	BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
+
+	memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+
+	/* determine capability flags */
+	cap = vht_cap.cap;
+
+	/* reserve and fill IE */
+	pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2);
+	ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
+}
+
 static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
@@ -371,6 +394,7 @@
 			4 + /* power capability */
 			2 + 2 * sband->n_channels + /* supported channels */
 			2 + sizeof(struct ieee80211_ht_cap) + /* HT */
+			2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */
 			assoc_data->ie_len + /* extra IEs */
 			9, /* WMM */
 			GFP_KERNEL);
@@ -503,6 +527,9 @@
 		ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
 				    sband, local->oper_channel, ifmgd->ap_smps);
 
+	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+		ieee80211_add_vht_ie(sdata, skb, sband);
+
 	/* if present, add any custom non-vendor IEs that go after HT */
 	if (assoc_data->ie_len && assoc_data->ie) {
 		noffset = ieee80211_ie_split_vendor(assoc_data->ie,
@@ -583,8 +610,6 @@
 			IEEE80211_SKB_CB(skb)->flags |=
 				IEEE80211_TX_INTFL_DONT_ENCRYPT;
 
-		drv_mgd_prepare_tx(local, sdata);
-
 		ieee80211_tx_skb(sdata, skb);
 	}
 }
@@ -687,6 +712,7 @@
 	/* XXX: shouldn't really modify cfg80211-owned data! */
 	ifmgd->associated->channel = sdata->local->oper_channel;
 
+	/* XXX: wait for a beacon first? */
 	ieee80211_wake_queues_by_reason(&sdata->local->hw,
 					IEEE80211_QUEUE_STOP_REASON_CSA);
  out:
@@ -763,36 +789,32 @@
 
 	sdata->local->csa_channel = new_ch;
 
+	ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+
+	if (sw_elem->mode)
+		ieee80211_stop_queues_by_reason(&sdata->local->hw,
+				IEEE80211_QUEUE_STOP_REASON_CSA);
+
 	if (sdata->local->ops->channel_switch) {
 		/* use driver's channel switch callback */
-		struct ieee80211_channel_switch ch_switch;
-		memset(&ch_switch, 0, sizeof(ch_switch));
-		ch_switch.timestamp = timestamp;
-		if (sw_elem->mode) {
-			ch_switch.block_tx = true;
-			ieee80211_stop_queues_by_reason(&sdata->local->hw,
-					IEEE80211_QUEUE_STOP_REASON_CSA);
-		}
-		ch_switch.channel = new_ch;
-		ch_switch.count = sw_elem->count;
-		ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+		struct ieee80211_channel_switch ch_switch = {
+			.timestamp = timestamp,
+			.block_tx = sw_elem->mode,
+			.channel = new_ch,
+			.count = sw_elem->count,
+		};
+
 		drv_channel_switch(sdata->local, &ch_switch);
 		return;
 	}
 
 	/* channel switch handled in software */
-	if (sw_elem->count <= 1) {
+	if (sw_elem->count <= 1)
 		ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
-	} else {
-		if (sw_elem->mode)
-			ieee80211_stop_queues_by_reason(&sdata->local->hw,
-					IEEE80211_QUEUE_STOP_REASON_CSA);
-		ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+	else
 		mod_timer(&ifmgd->chswitch_timer,
-			  jiffies +
-			  msecs_to_jiffies(sw_elem->count *
-					   cbss->beacon_interval));
-	}
+			  TU_TO_EXP_TIME(sw_elem->count *
+					 cbss->beacon_interval));
 }
 
 static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
@@ -1007,6 +1029,16 @@
 	ieee80211_change_ps(local);
 }
 
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
+{
+	bool ps_allowed = ieee80211_powersave_allowed(sdata);
+
+	if (sdata->vif.bss_conf.ps != ps_allowed) {
+		sdata->vif.bss_conf.ps = ps_allowed;
+		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS);
+	}
+}
+
 void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
 {
 	struct ieee80211_local *local =
@@ -1239,7 +1271,7 @@
 	}
 
 	use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
-	if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
+	if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ)
 		use_short_slot = true;
 
 	if (use_protection != bss_conf->use_cts_prot) {
@@ -1310,6 +1342,8 @@
 	ieee80211_recalc_smps(local);
 	mutex_unlock(&local->iflist_mtx);
 
+	ieee80211_recalc_ps_vif(sdata);
+
 	netif_tx_start_all_queues(sdata->dev);
 	netif_carrier_on(sdata->dev);
 }
@@ -1371,6 +1405,9 @@
 	}
 	local->ps_sdata = NULL;
 
+	/* disable per-vif ps */
+	ieee80211_recalc_ps_vif(sdata);
+
 	/* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
 	if (tx)
 		drv_flush(local, false);
@@ -1430,6 +1467,8 @@
 	del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
 	del_timer_sync(&sdata->u.mgd.timer);
 	del_timer_sync(&sdata->u.mgd.chswitch_timer);
+
+	sdata->u.mgd.timers_running = 0;
 }
 
 void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1540,7 +1579,8 @@
 			ssid_len = ssid[1];
 
 		ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
-					 0, (u32) -1, true, false);
+					 0, (u32) -1, true, false,
+					 ifmgd->associated->channel);
 	}
 
 	ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1643,7 +1683,9 @@
 		ssid_len = ssid[1];
 
 	skb = ieee80211_build_probe_req(sdata, cbss->bssid,
-					(u32) -1, ssid + 2, ssid_len,
+					(u32) -1,
+					sdata->local->oper_channel,
+					ssid + 2, ssid_len,
 					NULL, 0, true);
 
 	return skb;
@@ -1654,7 +1696,6 @@
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_local *local = sdata->local;
-	u8 bssid[ETH_ALEN];
 	u8 frame_buf[DEAUTH_DISASSOC_LEN];
 
 	mutex_lock(&ifmgd->mtx);
@@ -1663,9 +1704,8 @@
 		return;
 	}
 
-	memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
-	sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
+	sdata_info(sdata, "Connection to AP %pM lost\n",
+		   ifmgd->associated->bssid);
 
 	ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
 			       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -1683,7 +1723,7 @@
 	mutex_unlock(&local->mtx);
 }
 
-void ieee80211_beacon_connection_loss_work(struct work_struct *work)
+static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
@@ -2230,14 +2270,10 @@
 		mutex_unlock(&local->iflist_mtx);
 	}
 
-	if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
-	    (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
-							ETH_ALEN) == 0)) {
-		struct ieee80211_channel_sw_ie *sw_elem =
-			(struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
-		ieee80211_sta_process_chanswitch(sdata, sw_elem,
+	if (elems->ch_switch_ie &&
+	    memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0)
+		ieee80211_sta_process_chanswitch(sdata, elems->ch_switch_ie,
 						 bss, rx_status->mactime);
-	}
 }
 
 
@@ -2324,7 +2360,7 @@
 	if (baselen > len)
 		return;
 
-	if (rx_status->freq != local->hw.conf.channel->center_freq)
+	if (rx_status->freq != local->oper_channel->center_freq)
 		return;
 
 	if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
@@ -2488,7 +2524,7 @@
 	    !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
 		struct ieee80211_supported_band *sband;
 
-		sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+		sband = local->hw.wiphy->bands[local->oper_channel->band];
 
 		changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
 						  bssid, true);
@@ -2671,7 +2707,8 @@
 		 * will not answer to direct packet in unassociated state.
 		 */
 		ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
-					 NULL, 0, (u32) -1, true, false);
+					 NULL, 0, (u32) -1, true, false,
+					 auth_data->bss->channel);
 	}
 
 	auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -2998,41 +3035,17 @@
 	return 0;
 }
 
-static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
-				     struct cfg80211_bss *cbss, bool assoc)
+static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
+				  struct cfg80211_bss *cbss)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-	struct ieee80211_bss *bss = (void *)cbss->priv;
-	struct sta_info *sta = NULL;
-	bool have_sta = false;
-	int err;
 	int ht_cfreq;
 	enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
 	const u8 *ht_oper_ie;
 	const struct ieee80211_ht_operation *ht_oper = NULL;
 	struct ieee80211_supported_band *sband;
 
-	if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
-		return -EINVAL;
-
-	if (assoc) {
-		rcu_read_lock();
-		have_sta = sta_info_get(sdata, cbss->bssid);
-		rcu_read_unlock();
-	}
-
-	if (!have_sta) {
-		sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
-		if (!sta)
-			return -ENOMEM;
-	}
-
-	mutex_lock(&local->mtx);
-	ieee80211_recalc_idle(sdata->local);
-	mutex_unlock(&local->mtx);
-
-	/* switch to the right channel */
 	sband = local->hw.wiphy->bands[cbss->channel->band];
 
 	ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
@@ -3095,10 +3108,51 @@
 	local->oper_channel = cbss->channel;
 	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
-	if (sta) {
+	return 0;
+}
+
+static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
+				     struct cfg80211_bss *cbss, bool assoc)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	struct ieee80211_bss *bss = (void *)cbss->priv;
+	struct sta_info *new_sta = NULL;
+	bool have_sta = false;
+	int err;
+
+	if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
+		return -EINVAL;
+
+	if (assoc) {
+		rcu_read_lock();
+		have_sta = sta_info_get(sdata, cbss->bssid);
+		rcu_read_unlock();
+	}
+
+	if (!have_sta) {
+		new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
+		if (!new_sta)
+			return -ENOMEM;
+	}
+
+	mutex_lock(&local->mtx);
+	ieee80211_recalc_idle(sdata->local);
+	mutex_unlock(&local->mtx);
+
+	if (new_sta) {
 		u32 rates = 0, basic_rates = 0;
 		bool have_higher_than_11mbit;
 		int min_rate = INT_MAX, min_rate_index = -1;
+		struct ieee80211_supported_band *sband;
+
+		sband = local->hw.wiphy->bands[cbss->channel->band];
+
+		err = ieee80211_prep_channel(sdata, cbss);
+		if (err) {
+			sta_info_free(local, new_sta);
+			return err;
+		}
 
 		ieee80211_get_rates(sband, bss->supp_rates,
 				    bss->supp_rates_len,
@@ -3120,7 +3174,7 @@
 			basic_rates = BIT(min_rate_index);
 		}
 
-		sta->sta.supp_rates[cbss->channel->band] = rates;
+		new_sta->sta.supp_rates[cbss->channel->band] = rates;
 		sdata->vif.bss_conf.basic_rates = basic_rates;
 
 		/* cf. IEEE 802.11 9.2.12 */
@@ -3143,10 +3197,10 @@
 			BSS_CHANGED_BEACON_INT);
 
 		if (assoc)
-			sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+			sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH);
 
-		err = sta_info_insert(sta);
-		sta = NULL;
+		err = sta_info_insert(new_sta);
+		new_sta = NULL;
 		if (err) {
 			sdata_info(sdata,
 				   "failed to insert STA entry for the AP (error %d)\n",
@@ -3298,9 +3352,13 @@
 	}
 
 	/* prepare assoc data */
-
-	ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
-	ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
+	
+	/*
+	 * keep only the 40 MHz disable bit set as it might have
+	 * been set during authentication already, all other bits
+	 * should be reset for a new connection
+	 */
+	ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
 
 	ifmgd->beacon_crc_valid = false;
 
@@ -3316,21 +3374,34 @@
 		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
 		    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
 			ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+			ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
 			netdev_info(sdata->dev,
-				    "disabling HT due to WEP/TKIP use\n");
+				    "disabling HT/VHT due to WEP/TKIP use\n");
 		}
 	}
 
-	if (req->flags & ASSOC_REQ_DISABLE_HT)
+	if (req->flags & ASSOC_REQ_DISABLE_HT) {
 		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+	}
 
 	/* Also disable HT if we don't support it or the AP doesn't use WMM */
 	sband = local->hw.wiphy->bands[req->bss->channel->band];
 	if (!sband->ht_cap.ht_supported ||
 	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
 		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
-		netdev_info(sdata->dev,
-			    "disabling HT as WMM/QoS is not supported\n");
+		if (!bss->wmm_used)
+			netdev_info(sdata->dev,
+				    "disabling HT as WMM/QoS is not supported by the AP\n");
+	}
+
+	/* disable VHT if we don't support it or the AP doesn't use WMM */
+	if (!sband->vht_cap.vht_supported ||
+	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+		if (!bss->wmm_used)
+			netdev_info(sdata->dev,
+				    "disabling VHT as WMM/QoS is not supported by the AP\n");
 	}
 
 	memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3465,14 +3536,17 @@
 		   req->bssid, req->reason_code);
 
 	if (ifmgd->associated &&
-	    ether_addr_equal(ifmgd->associated->bssid, req->bssid))
+	    ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
 		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
 				       req->reason_code, true, frame_buf);
-	else
+	} else {
+		drv_mgd_prepare_tx(sdata->local, sdata);
 		ieee80211_send_deauth_disassoc(sdata, req->bssid,
 					       IEEE80211_STYPE_DEAUTH,
 					       req->reason_code, true,
 					       frame_buf);
+	}
+
 	mutex_unlock(&ifmgd->mtx);
 
 	__cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 635c325..507121d 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -116,6 +116,9 @@
 		if (!ieee80211_sdata_running(sdata))
 			continue;
 
+		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+			continue;
+
 		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
 			set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 
@@ -144,6 +147,9 @@
 
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+			continue;
+
 		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
 			clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 6e4fd32..10de668 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -56,7 +56,7 @@
 	if (!ref)
 		return;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	sband = local->hw.wiphy->bands[local->oper_channel->band];
 
 	ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
 	set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0cb4ede..b382605 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -60,7 +60,9 @@
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
+	if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
+			    RX_FLAG_FAILED_PLCP_CRC |
+			    RX_FLAG_AMPDU_IS_ZEROLEN))
 		return 1;
 	if (unlikely(skb->len < 16 + present_fcs_len))
 		return 1;
@@ -91,6 +93,13 @@
 	if (status->flag & RX_FLAG_HT) /* HT info */
 		len += 3;
 
+	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+		/* padding */
+		while (len & 3)
+			len++;
+		len += 8;
+	}
+
 	return len;
 }
 
@@ -215,6 +224,37 @@
 		pos++;
 		*pos++ = status->rate_idx;
 	}
+
+	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+		u16 flags = 0;
+
+		/* ensure 4 byte alignment */
+		while ((pos - (u8 *)rthdr) & 3)
+			pos++;
+		rthdr->it_present |=
+			cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
+		put_unaligned_le32(status->ampdu_reference, pos);
+		pos += 4;
+		if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
+			flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
+		if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
+			flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
+		if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
+			flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
+		if (status->flag & RX_FLAG_AMPDU_IS_LAST)
+			flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
+		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
+			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
+		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
+		put_unaligned_le16(flags, pos);
+		pos += 2;
+		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+			*pos++ = status->ampdu_delimiter_crc;
+		else
+			*pos++ = 0;
+		*pos++ = 0;
+	}
 }
 
 /*
@@ -2268,7 +2308,7 @@
 
 		goto queue;
 	case WLAN_CATEGORY_SPECTRUM_MGMT:
-		if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
+		if (status->band != IEEE80211_BAND_5GHZ)
 			break;
 
 		if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -2772,8 +2812,7 @@
 		if (!bssid) {
 			if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
 				return 0;
-		} else if (!ieee80211_bssid_match(bssid,
-					sdata->vif.addr)) {
+		} else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
 			/*
 			 * Accept public action frames even when the
 			 * BSSID doesn't match, this is used for P2P
@@ -2793,9 +2832,18 @@
 		if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
 			return 0;
 		break;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		if (!ieee80211_is_public_action(hdr, skb->len) &&
+		    !ieee80211_is_probe_req(hdr->frame_control) &&
+		    !ieee80211_is_probe_resp(hdr->frame_control) &&
+		    !ieee80211_is_beacon(hdr->frame_control))
+			return 0;
+		if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
+			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+		break;
 	default:
 		/* should never get here */
-		WARN_ON(1);
+		WARN_ON_ONCE(1);
 		break;
 	}
 
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bcaee5d..740e414 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -299,7 +299,7 @@
 	if (local->scan_req != local->int_scan_req)
 		cfg80211_scan_done(local->scan_req, aborted);
 	local->scan_req = NULL;
-	local->scan_sdata = NULL;
+	rcu_assign_pointer(local->scan_sdata, NULL);
 
 	local->scanning = 0;
 	local->scan_channel = NULL;
@@ -416,7 +416,8 @@
 			local->scan_req->ssids[i].ssid_len,
 			local->scan_req->ie, local->scan_req->ie_len,
 			local->scan_req->rates[band], false,
-			local->scan_req->no_cck);
+			local->scan_req->no_cck,
+			local->hw.conf.channel);
 
 	/*
 	 * After sending probe requests, wait for probe responses
@@ -479,11 +480,10 @@
 	if (local->ops->hw_scan) {
 		__set_bit(SCAN_HW_SCANNING, &local->scanning);
 	} else if ((req->n_channels == 1) &&
-		   (req->channels[0]->center_freq ==
-		    local->hw.conf.channel->center_freq)) {
-
-		/* If we are scanning only on the current channel, then
-		 * we do not need to stop normal activities
+		   (req->channels[0] == local->oper_channel)) {
+		/*
+		 * If we are scanning only on the operating channel
+		 * then we do not need to stop normal activities
 		 */
 		unsigned long next_delay;
 
@@ -984,7 +984,6 @@
 			kfree(local->sched_scan_ies.ie[i]);
 
 		drv_sched_scan_stop(local, sdata);
-		rcu_assign_pointer(local->sched_scan_sdata, NULL);
 	}
 out:
 	mutex_unlock(&local->mtx);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8cd7291..b0801b7 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -519,19 +519,27 @@
 		u64 cookie = (unsigned long)skb;
 		acked = info->flags & IEEE80211_TX_STAT_ACK;
 
-		/*
-		 * TODO: When we have non-netdev frame TX,
-		 * we cannot use skb->dev->ieee80211_ptr
-		 */
-
 		if (ieee80211_is_nullfunc(hdr->frame_control) ||
-		    ieee80211_is_qos_nullfunc(hdr->frame_control))
+		    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
 			cfg80211_probe_status(skb->dev, hdr->addr1,
 					      cookie, acked, GFP_ATOMIC);
-		else
+		} else if (skb->dev) {
 			cfg80211_mgmt_tx_status(
 				skb->dev->ieee80211_ptr, cookie, skb->data,
 				skb->len, acked, GFP_ATOMIC);
+		} else {
+			struct ieee80211_sub_if_data *p2p_sdata;
+
+			rcu_read_lock();
+
+			p2p_sdata = rcu_dereference(local->p2p_sdata);
+			if (p2p_sdata) {
+				cfg80211_mgmt_tx_status(
+					&p2p_sdata->wdev, cookie, skb->data,
+					skb->len, acked, GFP_ATOMIC);
+			}
+			rcu_read_unlock();
+		}
 	}
 
 	if (unlikely(info->ack_frame_id)) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index c6d33b5..18d9c8a 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -24,7 +24,7 @@
 			__string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
 #define VIF_ASSIGN	__entry->vif_type = sdata->vif.type; __entry->sdata = sdata;	\
 			__entry->p2p = sdata->vif.p2p;					\
-			__assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+			__assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
 #define VIF_PR_FMT	" vif:%s(%d%s)"
 #define VIF_PR_ARG	__get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
@@ -274,9 +274,12 @@
 		__entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
 		__entry->max_sleep_period = local->hw.conf.max_sleep_period;
 		__entry->listen_interval = local->hw.conf.listen_interval;
-		__entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count;
-		__entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
-		__entry->center_freq = local->hw.conf.channel->center_freq;
+		__entry->long_frame_max_tx_count =
+			local->hw.conf.long_frame_max_tx_count;
+		__entry->short_frame_max_tx_count =
+			local->hw.conf.short_frame_max_tx_count;
+		__entry->center_freq = local->hw.conf.channel ?
+					local->hw.conf.channel->center_freq : 0;
 		__entry->channel_type = local->hw.conf.channel_type;
 		__entry->smps = local->hw.conf.smps_mode;
 	),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712f..3b807bc 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -55,7 +55,7 @@
 	if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
 		return 0;
 
-	sband = local->hw.wiphy->bands[tx->channel->band];
+	sband = local->hw.wiphy->bands[info->band];
 	txrate = &sband->bitrates[info->control.rates[0].idx];
 
 	erp = txrate->flags & IEEE80211_RATE_ERP_G;
@@ -615,7 +615,7 @@
 
 	memset(&txrc, 0, sizeof(txrc));
 
-	sband = tx->local->hw.wiphy->bands[tx->channel->band];
+	sband = tx->local->hw.wiphy->bands[info->band];
 
 	len = min_t(u32, tx->skb->len + FCS_LEN,
 			 tx->local->hw.wiphy->frag_threshold);
@@ -626,13 +626,13 @@
 	txrc.bss_conf = &tx->sdata->vif.bss_conf;
 	txrc.skb = tx->skb;
 	txrc.reported_rate.idx = -1;
-	txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
+	txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
 	if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
 		txrc.max_rate_idx = -1;
 	else
 		txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
 	memcpy(txrc.rate_idx_mcs_mask,
-	       tx->sdata->rc_rateidx_mcs_mask[tx->channel->band],
+	       tx->sdata->rc_rateidx_mcs_mask[info->band],
 	       sizeof(txrc.rate_idx_mcs_mask));
 	txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
 		    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
@@ -667,7 +667,7 @@
 		 "scanning and associated. Target station: "
 		 "%pM on %d GHz band\n",
 		 tx->sdata->name, hdr->addr1,
-		 tx->channel->band ? 5 : 2))
+		 info->band ? 5 : 2))
 		return TX_DROP;
 
 	/*
@@ -1131,7 +1131,6 @@
 	tx->skb = skb;
 	tx->local = local;
 	tx->sdata = sdata;
-	tx->channel = local->hw.conf.channel;
 	__skb_queue_head_init(&tx->skbs);
 
 	/*
@@ -1204,6 +1203,7 @@
 			       struct sk_buff_head *skbs,
 			       bool txpending)
 {
+	struct ieee80211_tx_control control;
 	struct sk_buff *skb, *tmp;
 	unsigned long flags;
 
@@ -1240,10 +1240,10 @@
 		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
 		info->control.vif = vif;
-		info->control.sta = sta;
+		control.sta = sta;
 
 		__skb_unlink(skb, skbs);
-		drv_tx(local, skb);
+		drv_tx(local, &control, skb);
 	}
 
 	return true;
@@ -1399,8 +1399,7 @@
 		goto out;
 	}
 
-	tx.channel = local->hw.conf.channel;
-	info->band = tx.channel->band;
+	info->band = local->hw.conf.channel->band;
 
 	/* set up hw_queue value early */
 	if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1720,7 +1719,7 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_tx_info *info;
-	int ret = NETDEV_TX_BUSY, head_need;
+	int head_need;
 	u16 ethertype, hdrlen,  meshhdrlen = 0;
 	__le16 fc;
 	struct ieee80211_hdr hdr;
@@ -1736,10 +1735,8 @@
 	u32 info_flags = 0;
 	u16 info_id = 0;
 
-	if (unlikely(skb->len < ETH_HLEN)) {
-		ret = NETDEV_TX_OK;
+	if (unlikely(skb->len < ETH_HLEN))
 		goto fail;
-	}
 
 	/* convert Ethernet header to proper 802.11 header (based on
 	 * operation mode) */
@@ -1787,7 +1784,6 @@
 		if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
 			/* Do not send frames with mesh_ttl == 0 */
 			sdata->u.mesh.mshstats.dropped_frames_ttl++;
-			ret = NETDEV_TX_OK;
 			goto fail;
 		}
 		rcu_read_lock();
@@ -1880,10 +1876,8 @@
 
 		if (tdls_direct) {
 			/* link during setup - throw out frames to peer */
-			if (!tdls_auth) {
-				ret = NETDEV_TX_OK;
+			if (!tdls_auth)
 				goto fail;
-			}
 
 			/* DA SA BSSID */
 			memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1917,7 +1911,6 @@
 		hdrlen = 24;
 		break;
 	default:
-		ret = NETDEV_TX_OK;
 		goto fail;
 	}
 
@@ -1962,7 +1955,6 @@
 
 		I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
 
-		ret = NETDEV_TX_OK;
 		goto fail;
 	}
 
@@ -2017,10 +2009,8 @@
 		skb = skb_clone(skb, GFP_ATOMIC);
 		kfree_skb(tmp_skb);
 
-		if (!skb) {
-			ret = NETDEV_TX_OK;
+		if (!skb)
 			goto fail;
-		}
 	}
 
 	hdr.frame_control = fc;
@@ -2123,10 +2113,8 @@
 	return NETDEV_TX_OK;
 
  fail:
-	if (ret == NETDEV_TX_OK)
-		dev_kfree_skb(skb);
-
-	return ret;
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
 }
 
 
@@ -2301,12 +2289,9 @@
 	struct ieee80211_sub_if_data *sdata = NULL;
 	struct ieee80211_if_ap *ap = NULL;
 	struct beacon_data *beacon;
-	struct ieee80211_supported_band *sband;
-	enum ieee80211_band band = local->hw.conf.channel->band;
+	enum ieee80211_band band = local->oper_channel->band;
 	struct ieee80211_tx_rate_control txrc;
 
-	sband = local->hw.wiphy->bands[band];
-
 	rcu_read_lock();
 
 	sdata = vif_to_sdata(vif);
@@ -2416,7 +2401,7 @@
 		memset(mgmt, 0, hdr_len);
 		mgmt->frame_control =
 		    cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
-		memset(mgmt->da, 0xff, ETH_ALEN);
+		eth_broadcast_addr(mgmt->da);
 		memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 		memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 		mgmt->u.beacon.beacon_int =
@@ -2428,9 +2413,9 @@
 		*pos++ = WLAN_EID_SSID;
 		*pos++ = 0x0;
 
-		if (ieee80211_add_srates_ie(sdata, skb, true) ||
+		if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
 		    mesh_add_ds_params_ie(skb, sdata) ||
-		    ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+		    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
 		    mesh_add_rsn_ie(skb, sdata) ||
 		    mesh_add_ht_cap_ie(skb, sdata) ||
 		    mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2453,12 +2438,12 @@
 
 	memset(&txrc, 0, sizeof(txrc));
 	txrc.hw = hw;
-	txrc.sband = sband;
+	txrc.sband = local->hw.wiphy->bands[band];
 	txrc.bss_conf = &sdata->vif.bss_conf;
 	txrc.skb = skb;
 	txrc.reported_rate.idx = -1;
 	txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
-	if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+	if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
 		txrc.max_rate_idx = -1;
 	else
 		txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
@@ -2482,7 +2467,8 @@
 					struct ieee80211_vif *vif)
 {
 	struct ieee80211_if_ap *ap = NULL;
-	struct sk_buff *presp = NULL, *skb = NULL;
+	struct sk_buff *skb = NULL;
+	struct probe_resp *presp = NULL;
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
@@ -2496,10 +2482,12 @@
 	if (!presp)
 		goto out;
 
-	skb = skb_copy(presp, GFP_ATOMIC);
+	skb = dev_alloc_skb(presp->len);
 	if (!skb)
 		goto out;
 
+	memcpy(skb_put(skb, presp->len), presp->data, presp->len);
+
 	hdr = (struct ieee80211_hdr *) skb->data;
 	memset(hdr->addr1, 0, sizeof(hdr->addr1));
 
@@ -2610,9 +2598,9 @@
 	memset(hdr, 0, sizeof(*hdr));
 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					 IEEE80211_STYPE_PROBE_REQ);
-	memset(hdr->addr1, 0xff, ETH_ALEN);
+	eth_broadcast_addr(hdr->addr1);
 	memcpy(hdr->addr2, vif->addr, ETH_ALEN);
-	memset(hdr->addr3, 0xff, ETH_ALEN);
+	eth_broadcast_addr(hdr->addr3);
 
 	pos = skb_put(skb, ie_ssid_len);
 	*pos++ = WLAN_EID_SSID;
@@ -2709,8 +2697,7 @@
 	info = IEEE80211_SKB_CB(skb);
 
 	tx.flags |= IEEE80211_TX_PS_BUFFERED;
-	tx.channel = local->hw.conf.channel;
-	info->band = tx.channel->band;
+	info->band = local->oper_channel->band;
 
 	if (invoke_tx_handlers(&tx))
 		skb = NULL;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 39b82fe..471fb05 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -276,6 +276,9 @@
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 		int ac;
 
+		if (!sdata->dev)
+			continue;
+
 		if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
 			continue;
 
@@ -364,6 +367,9 @@
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 		int ac;
 
+		if (!sdata->dev)
+			continue;
+
 		for (ac = 0; ac < n_acs; ac++) {
 			if (sdata->vif.hw_queue[ac] == queue ||
 			    sdata->vif.cab_queue == queue)
@@ -768,8 +774,11 @@
 				elem_parse_failed = true;
 			break;
 		case WLAN_EID_CHANNEL_SWITCH:
-			elems->ch_switch_elem = pos;
-			elems->ch_switch_elem_len = elen;
+			if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+				elem_parse_failed = true;
+				break;
+			}
+			elems->ch_switch_ie = (void *)pos;
 			break;
 		case WLAN_EID_QUIET:
 			if (!elems->quiet_elem) {
@@ -832,7 +841,7 @@
 
 	memset(&qparam, 0, sizeof(qparam));
 
-	use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
+	use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) &&
 		 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
 
 	/*
@@ -899,7 +908,8 @@
 		drv_conf_tx(local, sdata, ac, &qparam);
 	}
 
-	if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+	if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
+	    sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
 		sdata->vif.bss_conf.qos = enable_qos;
 		if (bss_notify)
 			ieee80211_bss_info_change_notify(sdata,
@@ -919,7 +929,7 @@
 		if ((supp_rates[i] & 0x7f) * 5 > 110)
 			have_higher_than_11mbit = 1;
 
-	if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
+	if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
 	    have_higher_than_11mbit)
 		sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
 	else
@@ -1100,6 +1110,7 @@
 
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
 					  u8 *dst, u32 ratemask,
+					  struct ieee80211_channel *chan,
 					  const u8 *ssid, size_t ssid_len,
 					  const u8 *ie, size_t ie_len,
 					  bool directed)
@@ -1109,7 +1120,7 @@
 	struct ieee80211_mgmt *mgmt;
 	size_t buf_len;
 	u8 *buf;
-	u8 chan;
+	u8 chan_no;
 
 	/* FIXME: come up with a proper value */
 	buf = kmalloc(200 + ie_len, GFP_KERNEL);
@@ -1122,14 +1133,12 @@
 	 * badly-behaved APs don't respond when this parameter is included.
 	 */
 	if (directed)
-		chan = 0;
+		chan_no = 0;
 	else
-		chan = ieee80211_frequency_to_channel(
-			local->hw.conf.channel->center_freq);
+		chan_no = ieee80211_frequency_to_channel(chan->center_freq);
 
-	buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
-					   local->hw.conf.channel->band,
-					   ratemask, chan);
+	buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, chan->band,
+					   ratemask, chan_no);
 
 	skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
 				     ssid, ssid_len,
@@ -1154,11 +1163,13 @@
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
 			      const u8 *ssid, size_t ssid_len,
 			      const u8 *ie, size_t ie_len,
-			      u32 ratemask, bool directed, bool no_cck)
+			      u32 ratemask, bool directed, bool no_cck,
+			      struct ieee80211_channel *channel)
 {
 	struct sk_buff *skb;
 
-	skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
+	skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel,
+					ssid, ssid_len,
 					ie, ie_len, directed);
 	if (skb) {
 		if (no_cck)
@@ -1359,7 +1370,8 @@
 		switch (sdata->vif.type) {
 		case NL80211_IFTYPE_STATION:
 			changed |= BSS_CHANGED_ASSOC |
-				   BSS_CHANGED_ARP_FILTER;
+				   BSS_CHANGED_ARP_FILTER |
+				   BSS_CHANGED_PS;
 			mutex_lock(&sdata->u.mgd.mtx);
 			ieee80211_bss_info_change_notify(sdata, changed);
 			mutex_unlock(&sdata->u.mgd.mtx);
@@ -1385,6 +1397,9 @@
 		case NL80211_IFTYPE_MONITOR:
 			/* ignore virtual */
 			break;
+		case NL80211_IFTYPE_P2P_DEVICE:
+			changed = BSS_CHANGED_IDLE;
+			break;
 		case NL80211_IFTYPE_UNSPECIFIED:
 		case NUM_NL80211_IFTYPES:
 		case NL80211_IFTYPE_P2P_CLIENT:
@@ -1571,6 +1586,8 @@
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (!ieee80211_sdata_running(sdata))
 			continue;
+		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+			continue;
 		if (sdata->vif.type != NL80211_IFTYPE_STATION)
 			goto set;
 
@@ -1809,7 +1826,8 @@
 }
 
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
-			    struct sk_buff *skb, bool need_basic)
+			    struct sk_buff *skb, bool need_basic,
+			    enum ieee80211_band band)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
@@ -1817,7 +1835,7 @@
 	u8 i, rates, *pos;
 	u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	sband = local->hw.wiphy->bands[band];
 	rates = sband->n_bitrates;
 	if (rates > 8)
 		rates = 8;
@@ -1840,7 +1858,8 @@
 }
 
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
-				struct sk_buff *skb, bool need_basic)
+				struct sk_buff *skb, bool need_basic,
+				enum ieee80211_band band)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
@@ -1848,7 +1867,7 @@
 	u8 i, exrates, *pos;
 	u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	sband = local->hw.wiphy->bands[band];
 	exrates = sband->n_bitrates;
 	if (exrates > 8)
 		exrates -= 8;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 0bc6b60..8f4b0b2 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -131,14 +131,13 @@
 			int hook_thresh)
 {
 	unsigned int verdict;
+	struct nf_hook_ops *elem = list_entry_rcu(*i, struct nf_hook_ops, list);
 
 	/*
 	 * The caller must not block between calls to this
 	 * function because of risk of continuing from deleted element.
 	 */
-	list_for_each_continue_rcu(*i, head) {
-		struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
-
+	list_for_each_entry_continue_rcu(elem, head, list) {
 		if (hook_thresh > elem->priority)
 			continue;
 
@@ -155,11 +154,14 @@
 				continue;
 			}
 #endif
-			if (verdict != NF_REPEAT)
+			if (verdict != NF_REPEAT) {
+				*i = &elem->list;
 				return verdict;
+			}
 			goto repeat;
 		}
 	}
+	*i = &elem->list;
 	return NF_ACCEPT;
 }
 
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index f987138..8b2cffd 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -250,7 +250,8 @@
 
 config	IP_VS_FTP
   	tristate "FTP protocol helper"
-        depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT
+	depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT && \
+		NF_CONNTRACK_FTP
 	select IP_VS_NFCT
 	---help---
 	  FTP is a protocol that transfers IP address and/or port number in
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 64f9e8f..9713e6e 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -180,22 +180,38 @@
 }
 
 
-/*
- *	ip_vs_app registration routine
- */
-int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
+/* Register application for netns */
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
-	/* increase the module use count */
-	ip_vs_use_count_inc();
+	struct ip_vs_app *a;
+	int err = 0;
+
+	if (!ipvs)
+		return ERR_PTR(-ENOENT);
 
 	mutex_lock(&__ip_vs_app_mutex);
 
-	list_add(&app->a_list, &ipvs->app_list);
+	list_for_each_entry(a, &ipvs->app_list, a_list) {
+		if (!strcmp(app->name, a->name)) {
+			err = -EEXIST;
+			goto out_unlock;
+		}
+	}
+	a = kmemdup(app, sizeof(*app), GFP_KERNEL);
+	if (!a) {
+		err = -ENOMEM;
+		goto out_unlock;
+	}
+	INIT_LIST_HEAD(&a->incs_list);
+	list_add(&a->a_list, &ipvs->app_list);
+	/* increase the module use count */
+	ip_vs_use_count_inc();
 
+out_unlock:
 	mutex_unlock(&__ip_vs_app_mutex);
 
-	return 0;
+	return err ? ERR_PTR(err) : a;
 }
 
 
@@ -205,20 +221,29 @@
  */
 void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
-	struct ip_vs_app *inc, *nxt;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_app *a, *anxt, *inc, *nxt;
+
+	if (!ipvs)
+		return;
 
 	mutex_lock(&__ip_vs_app_mutex);
 
-	list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
-		ip_vs_app_inc_release(net, inc);
+	list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
+		if (app && strcmp(app->name, a->name))
+			continue;
+		list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
+			ip_vs_app_inc_release(net, inc);
+		}
+
+		list_del(&a->a_list);
+		kfree(a);
+
+		/* decrease the module use count */
+		ip_vs_use_count_dec();
 	}
 
-	list_del(&app->a_list);
-
 	mutex_unlock(&__ip_vs_app_mutex);
-
-	/* decrease the module use count */
-	ip_vs_use_count_dec();
 }
 
 
@@ -586,5 +611,6 @@
 
 void __net_exit ip_vs_app_net_cleanup(struct net *net)
 {
+	unregister_ip_vs_app(net, NULL /* all */);
 	proc_net_remove(net, "ip_vs_app");
 }
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b54ecce..58918e2 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1303,7 +1303,8 @@
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
-	unsigned int offset, ihl, verdict;
+	unsigned int offset, offset2, ihl, verdict;
+	bool ipip;
 
 	*related = 1;
 
@@ -1345,6 +1346,21 @@
 
 	net = skb_net(skb);
 
+	/* Special case for errors for IPIP packets */
+	ipip = false;
+	if (cih->protocol == IPPROTO_IPIP) {
+		if (unlikely(cih->frag_off & htons(IP_OFFSET)))
+			return NF_ACCEPT;
+		/* Error for our IPIP must arrive at LOCAL_IN */
+		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
+			return NF_ACCEPT;
+		offset += cih->ihl * 4;
+		cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
+		if (cih == NULL)
+			return NF_ACCEPT; /* The packet looks wrong, ignore */
+		ipip = true;
+	}
+
 	pd = ip_vs_proto_data_get(net, cih->protocol);
 	if (!pd)
 		return NF_ACCEPT;
@@ -1358,11 +1374,14 @@
 	IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
 		      "Checking incoming ICMP for");
 
+	offset2 = offset;
 	offset += cih->ihl * 4;
 
 	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
-	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
+	/* The embedded headers contain source and dest in reverse order.
+	 * For IPIP this is error for request, not for reply.
+	 */
+	cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1);
 	if (!cp)
 		return NF_ACCEPT;
 
@@ -1376,6 +1395,57 @@
 		goto out;
 	}
 
+	if (ipip) {
+		__be32 info = ic->un.gateway;
+
+		/* Update the MTU */
+		if (ic->type == ICMP_DEST_UNREACH &&
+		    ic->code == ICMP_FRAG_NEEDED) {
+			struct ip_vs_dest *dest = cp->dest;
+			u32 mtu = ntohs(ic->un.frag.mtu);
+
+			/* Strip outer IP and ICMP, go to IPIP header */
+			__skb_pull(skb, ihl + sizeof(_icmph));
+			offset2 -= ihl + sizeof(_icmph);
+			skb_reset_network_header(skb);
+			IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
+				&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
+			rcu_read_lock();
+			ipv4_update_pmtu(skb, dev_net(skb->dev),
+					 mtu, 0, 0, 0, 0);
+			rcu_read_unlock();
+			/* Client uses PMTUD? */
+			if (!(cih->frag_off & htons(IP_DF)))
+				goto ignore_ipip;
+			/* Prefer the resulting PMTU */
+			if (dest) {
+				spin_lock(&dest->dst_lock);
+				if (dest->dst_cache)
+					mtu = dst_mtu(dest->dst_cache);
+				spin_unlock(&dest->dst_lock);
+			}
+			if (mtu > 68 + sizeof(struct iphdr))
+				mtu -= sizeof(struct iphdr);
+			info = htonl(mtu);
+		}
+		/* Strip outer IP, ICMP and IPIP, go to IP header of
+		 * original request.
+		 */
+		__skb_pull(skb, offset2);
+		skb_reset_network_header(skb);
+		IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
+			&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+			ic->type, ic->code, ntohl(info));
+		icmp_send(skb, ic->type, ic->code, info);
+		/* ICMP can be shorter but anyways, account it */
+		ip_vs_out_stats(cp, skb);
+
+ignore_ipip:
+		consume_skb(skb);
+		verdict = NF_STOLEN;
+		goto out;
+	}
+
 	/* do the statistics and put it back */
 	ip_vs_in_stats(cp, skb);
 	if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 84444dd..3c60137 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1801,6 +1801,12 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	{
+		.procname	= "pmtu_disc",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 #ifdef CONFIG_IP_VS_DEBUG
 	{
 		.procname	= "debug_level",
@@ -2759,6 +2765,7 @@
 	{
 		struct ip_vs_timeout_user t;
 
+		memset(&t, 0, sizeof(t));
 		__ip_vs_get_timeouts(net, &t);
 		if (copy_to_user(user, &t, sizeof(t)) != 0)
 			ret = -EFAULT;
@@ -3675,7 +3682,7 @@
  * per netns intit/exit func.
  */
 #ifdef CONFIG_SYSCTL
-int __net_init ip_vs_control_net_init_sysctl(struct net *net)
+static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
 {
 	int idx;
 	struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3726,6 +3733,8 @@
 	ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
 	tbl[idx++].data = &ipvs->sysctl_sync_retries;
 	tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+	ipvs->sysctl_pmtu_disc = 1;
+	tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
 
 
 	ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
@@ -3743,7 +3752,7 @@
 	return 0;
 }
 
-void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -3754,8 +3763,8 @@
 
 #else
 
-int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+static int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
 
 #endif
 
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index b20b29c..ad70b7e 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -441,16 +441,10 @@
 
 	if (!ipvs)
 		return -ENOENT;
-	app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
-	if (!app)
-		return -ENOMEM;
-	INIT_LIST_HEAD(&app->a_list);
-	INIT_LIST_HEAD(&app->incs_list);
-	ipvs->ftp_app = app;
 
-	ret = register_ip_vs_app(net, app);
-	if (ret)
-		goto err_exit;
+	app = register_ip_vs_app(net, &ip_vs_ftp);
+	if (IS_ERR(app))
+		return PTR_ERR(app);
 
 	for (i = 0; i < ports_count; i++) {
 		if (!ports[i])
@@ -464,9 +458,7 @@
 	return 0;
 
 err_unreg:
-	unregister_ip_vs_app(net, app);
-err_exit:
-	kfree(ipvs->ftp_app);
+	unregister_ip_vs_app(net, &ip_vs_ftp);
 	return ret;
 }
 /*
@@ -474,10 +466,7 @@
  */
 static void __ip_vs_ftp_exit(struct net *net)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
-	unregister_ip_vs_app(net, ipvs->ftp_app);
-	kfree(ipvs->ftp_app);
+	unregister_ip_vs_app(net, &ip_vs_ftp);
 }
 
 static struct pernet_operations ip_vs_ftp_ops = {
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 65b616a..543a554 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -49,6 +49,7 @@
 	IP_VS_RT_MODE_RDR	= 4, /* Allow redirect from remote daddr to
 				      * local
 				      */
+	IP_VS_RT_MODE_CONNECT	= 8, /* Always bind route to saddr */
 };
 
 /*
@@ -84,6 +85,42 @@
 	return dst;
 }
 
+/* Get route to daddr, update *saddr, optionally bind route to saddr */
+static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+				       u32 rtos, int rt_mode, __be32 *saddr)
+{
+	struct flowi4 fl4;
+	struct rtable *rt;
+	int loop = 0;
+
+	memset(&fl4, 0, sizeof(fl4));
+	fl4.daddr = daddr;
+	fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
+	fl4.flowi4_tos = rtos;
+
+retry:
+	rt = ip_route_output_key(net, &fl4);
+	if (IS_ERR(rt)) {
+		/* Invalid saddr ? */
+		if (PTR_ERR(rt) == -EINVAL && *saddr &&
+		    rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
+			*saddr = 0;
+			flowi4_update_output(&fl4, 0, rtos, daddr, 0);
+			goto retry;
+		}
+		IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
+		return NULL;
+	} else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
+		ip_rt_put(rt);
+		*saddr = fl4.saddr;
+		flowi4_update_output(&fl4, 0, rtos, daddr, fl4.saddr);
+		loop++;
+		goto retry;
+	}
+	*saddr = fl4.saddr;
+	return rt;
+}
+
 /* Get route to destination or remote server */
 static struct rtable *
 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
@@ -98,20 +135,13 @@
 		spin_lock(&dest->dst_lock);
 		if (!(rt = (struct rtable *)
 		      __ip_vs_dst_check(dest, rtos))) {
-			struct flowi4 fl4;
-
-			memset(&fl4, 0, sizeof(fl4));
-			fl4.daddr = dest->addr.ip;
-			fl4.flowi4_tos = rtos;
-			rt = ip_route_output_key(net, &fl4);
-			if (IS_ERR(rt)) {
+			rt = do_output_route4(net, dest->addr.ip, rtos,
+					      rt_mode, &dest->dst_saddr.ip);
+			if (!rt) {
 				spin_unlock(&dest->dst_lock);
-				IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
-					     &dest->addr.ip);
 				return NULL;
 			}
 			__ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
-			dest->dst_saddr.ip = fl4.saddr;
 			IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
 				  "rtos=%X\n",
 				  &dest->addr.ip, &dest->dst_saddr.ip,
@@ -122,19 +152,17 @@
 			*ret_saddr = dest->dst_saddr.ip;
 		spin_unlock(&dest->dst_lock);
 	} else {
-		struct flowi4 fl4;
+		__be32 saddr = htonl(INADDR_ANY);
 
-		memset(&fl4, 0, sizeof(fl4));
-		fl4.daddr = daddr;
-		fl4.flowi4_tos = rtos;
-		rt = ip_route_output_key(net, &fl4);
-		if (IS_ERR(rt)) {
-			IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
-				     &daddr);
+		/* For such unconfigured boxes avoid many route lookups
+		 * for performance reasons because we do not remember saddr
+		 */
+		rt_mode &= ~IP_VS_RT_MODE_CONNECT;
+		rt = do_output_route4(net, daddr, rtos, rt_mode, &saddr);
+		if (!rt)
 			return NULL;
-		}
 		if (ret_saddr)
-			*ret_saddr = fl4.saddr;
+			*ret_saddr = saddr;
 	}
 
 	local = rt->rt_flags & RTCF_LOCAL;
@@ -331,6 +359,7 @@
 	old_dst = dest->dst_cache;
 	dest->dst_cache = NULL;
 	dst_release(old_dst);
+	dest->dst_saddr.ip = 0;
 }
 
 #define IP_VS_XMIT_TUNNEL(skb, cp)				\
@@ -766,12 +795,13 @@
 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 		  struct ip_vs_protocol *pp)
 {
+	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
 	struct rtable *rt;			/* Route to the other host */
 	__be32 saddr;				/* Source for tunnel */
 	struct net_device *tdev;		/* Device to other host */
 	struct iphdr  *old_iph = ip_hdr(skb);
 	u8     tos = old_iph->tos;
-	__be16 df = old_iph->frag_off;
+	__be16 df;
 	struct iphdr  *iph;			/* Our new IP header */
 	unsigned int max_headroom;		/* The extra header space needed */
 	int    mtu;
@@ -781,7 +811,8 @@
 
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
 				      RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
-						   IP_VS_RT_MODE_NON_LOCAL,
+						   IP_VS_RT_MODE_NON_LOCAL |
+						   IP_VS_RT_MODE_CONNECT,
 						   &saddr)))
 		goto tx_error_icmp;
 	if (rt->rt_flags & RTCF_LOCAL) {
@@ -796,13 +827,13 @@
 		IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
 		goto tx_error_put;
 	}
-	if (skb_dst(skb))
+	if (rt_is_output_route(skb_rtable(skb)))
 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 
-	df |= (old_iph->frag_off & htons(IP_DF));
+	/* Copy DF, reset fragment offset and MF */
+	df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
 
-	if ((old_iph->frag_off & htons(IP_DF) &&
-	    mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
+	if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error_put;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 45cf602..527651a 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -361,23 +361,6 @@
 	}
 }
 
-static inline int refresh_timer(struct nf_conntrack_expect *i)
-{
-	struct nf_conn_help *master_help = nfct_help(i->master);
-	const struct nf_conntrack_expect_policy *p;
-
-	if (!del_timer(&i->timeout))
-		return 0;
-
-	p = &rcu_dereference_protected(
-		master_help->helper,
-		lockdep_is_held(&nf_conntrack_lock)
-		)->expect_policy[i->class];
-	i->timeout.expires = jiffies + p->timeout * HZ;
-	add_timer(&i->timeout);
-	return 1;
-}
-
 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
 {
 	const struct nf_conntrack_expect_policy *p;
@@ -386,7 +369,7 @@
 	struct nf_conn_help *master_help = nfct_help(master);
 	struct nf_conntrack_helper *helper;
 	struct net *net = nf_ct_exp_net(expect);
-	struct hlist_node *n;
+	struct hlist_node *n, *next;
 	unsigned int h;
 	int ret = 1;
 
@@ -395,12 +378,12 @@
 		goto out;
 	}
 	h = nf_ct_expect_dst_hash(&expect->tuple);
-	hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
+	hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
 		if (expect_matches(i, expect)) {
-			/* Refresh timer: if it's dying, ignore.. */
-			if (refresh_timer(i)) {
-				ret = 0;
-				goto out;
+			if (del_timer(&i->timeout)) {
+				nf_ct_unlink_expect(i);
+				nf_ct_expect_put(i);
+				break;
 			}
 		} else if (expect_clash(i, expect)) {
 			ret = -EBUSY;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 14f67a2..da4fc37 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1896,10 +1896,15 @@
 ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
 {
 	struct nlattr *cda[CTA_MAX+1];
+	int ret;
 
 	nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
 
-	return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
+	spin_lock_bh(&nf_conntrack_lock);
+	ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
+	spin_unlock_bh(&nf_conntrack_lock);
+
+	return ret;
 }
 
 static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 0dc6385..51e928d 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -21,7 +21,6 @@
 #include <linux/notifier.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
@@ -294,9 +293,7 @@
 	nf_ct_l3proto_unregister_sysctl(net, proto);
 
 	/* Remove all contrack entries for this protocol */
-	rtnl_lock();
 	nf_ct_iterate_cleanup(net, kill_l3proto, proto);
-	rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
 
@@ -502,9 +499,7 @@
 	nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
 
 	/* Remove all contrack entries for this protocol */
-	rtnl_lock();
 	nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
-	rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
 
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 758a1ba..5c0a112 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -183,12 +183,12 @@
 	return len + digits_len(ct, dptr, limit, shift);
 }
 
-static int parse_addr(const struct nf_conn *ct, const char *cp,
-                      const char **endp, union nf_inet_addr *addr,
-                      const char *limit)
+static int sip_parse_addr(const struct nf_conn *ct, const char *cp,
+			  const char **endp, union nf_inet_addr *addr,
+			  const char *limit, bool delim)
 {
 	const char *end;
-	int ret = 0;
+	int ret;
 
 	if (!ct)
 		return 0;
@@ -197,16 +197,28 @@
 	switch (nf_ct_l3num(ct)) {
 	case AF_INET:
 		ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
+		if (ret == 0)
+			return 0;
 		break;
 	case AF_INET6:
+		if (cp < limit && *cp == '[')
+			cp++;
+		else if (delim)
+			return 0;
+
 		ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
+		if (ret == 0)
+			return 0;
+
+		if (end < limit && *end == ']')
+			end++;
+		else if (delim)
+			return 0;
 		break;
 	default:
 		BUG();
 	}
 
-	if (ret == 0 || end == cp)
-		return 0;
 	if (endp)
 		*endp = end;
 	return 1;
@@ -219,7 +231,7 @@
 	union nf_inet_addr addr;
 	const char *aux = dptr;
 
-	if (!parse_addr(ct, dptr, &dptr, &addr, limit)) {
+	if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) {
 		pr_debug("ip: %s parse failed.!\n", dptr);
 		return 0;
 	}
@@ -296,7 +308,7 @@
 		return 0;
 	dptr += shift;
 
-	if (!parse_addr(ct, dptr, &end, addr, limit))
+	if (!sip_parse_addr(ct, dptr, &end, addr, limit, true))
 		return -1;
 	if (end < limit && *end == ':') {
 		end++;
@@ -550,7 +562,7 @@
 	if (ret == 0)
 		return ret;
 
-	if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit))
+	if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true))
 		return -1;
 	if (*c == ':') {
 		c++;
@@ -599,7 +611,7 @@
 			       unsigned int dataoff, unsigned int datalen,
 			       const char *name,
 			       unsigned int *matchoff, unsigned int *matchlen,
-			       union nf_inet_addr *addr)
+			       union nf_inet_addr *addr, bool delim)
 {
 	const char *limit = dptr + datalen;
 	const char *start, *end;
@@ -613,7 +625,7 @@
 		return 0;
 
 	start += strlen(name);
-	if (!parse_addr(ct, start, &end, addr, limit))
+	if (!sip_parse_addr(ct, start, &end, addr, limit, delim))
 		return 0;
 	*matchoff = start - dptr;
 	*matchlen = end - start;
@@ -675,6 +687,47 @@
 	return 1;
 }
 
+static int sdp_parse_addr(const struct nf_conn *ct, const char *cp,
+			  const char **endp, union nf_inet_addr *addr,
+			  const char *limit)
+{
+	const char *end;
+	int ret;
+
+	memset(addr, 0, sizeof(*addr));
+	switch (nf_ct_l3num(ct)) {
+	case AF_INET:
+		ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
+		break;
+	case AF_INET6:
+		ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
+		break;
+	default:
+		BUG();
+	}
+
+	if (ret == 0)
+		return 0;
+	if (endp)
+		*endp = end;
+	return 1;
+}
+
+/* skip ip address. returns its length. */
+static int sdp_addr_len(const struct nf_conn *ct, const char *dptr,
+			const char *limit, int *shift)
+{
+	union nf_inet_addr addr;
+	const char *aux = dptr;
+
+	if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) {
+		pr_debug("ip: %s parse failed.!\n", dptr);
+		return 0;
+	}
+
+	return dptr - aux;
+}
+
 /* SDP header parsing: a SDP session description contains an ordered set of
  * headers, starting with a section containing general session parameters,
  * optionally followed by multiple media descriptions.
@@ -686,10 +739,10 @@
  */
 static const struct sip_header ct_sdp_hdrs[] = {
 	[SDP_HDR_VERSION]		= SDP_HDR("v=", NULL, digits_len),
-	[SDP_HDR_OWNER_IP4]		= SDP_HDR("o=", "IN IP4 ", epaddr_len),
-	[SDP_HDR_CONNECTION_IP4]	= SDP_HDR("c=", "IN IP4 ", epaddr_len),
-	[SDP_HDR_OWNER_IP6]		= SDP_HDR("o=", "IN IP6 ", epaddr_len),
-	[SDP_HDR_CONNECTION_IP6]	= SDP_HDR("c=", "IN IP6 ", epaddr_len),
+	[SDP_HDR_OWNER_IP4]		= SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
+	[SDP_HDR_CONNECTION_IP4]	= SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
+	[SDP_HDR_OWNER_IP6]		= SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
+	[SDP_HDR_CONNECTION_IP6]	= SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
 	[SDP_HDR_MEDIA]			= SDP_HDR("m=", NULL, media_len),
 };
 
@@ -775,8 +828,8 @@
 	if (ret <= 0)
 		return ret;
 
-	if (!parse_addr(ct, dptr + *matchoff, NULL, addr,
-			dptr + *matchoff + *matchlen))
+	if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr,
+			    dptr + *matchoff + *matchlen))
 		return -1;
 	return 1;
 }
@@ -1515,7 +1568,6 @@
 }
 
 static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
-static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
 
 static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
 	[SIP_EXPECT_SIGNALLING] = {
@@ -1585,9 +1637,9 @@
 			sip[i][j].me = THIS_MODULE;
 
 			if (ports[i] == SIP_PORT)
-				sprintf(sip_names[i][j], "sip");
+				sprintf(sip[i][j].name, "sip");
 			else
-				sprintf(sip_names[i][j], "sip-%u", i);
+				sprintf(sip[i][j].name, "sip-%u", i);
 
 			pr_debug("port #%u: %u\n", i, ports[i]);
 
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index b2e7310..d7ec928 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -79,11 +79,11 @@
 
 	if (tb[NFACCT_BYTES]) {
 		atomic64_set(&nfacct->bytes,
-			     be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES])));
+			     be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
 	}
 	if (tb[NFACCT_PKTS]) {
 		atomic64_set(&nfacct->pkts,
-			     be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS])));
+			     be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
 	}
 	atomic_set(&nfacct->refcnt, 1);
 	list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index d683619..32a1ba3 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -74,7 +74,7 @@
 	if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
 		return -EINVAL;
 
-	tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM]));
+	tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
 	tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
 
 	return 0;
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 7babe7d..817f9e9 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -43,7 +43,7 @@
 	const struct iphdr *iph = ip_hdr(skb);
 
 	/* packets in either direction go into same queue */
-	if (iph->saddr < iph->daddr)
+	if ((__force u32)iph->saddr < (__force u32)iph->daddr)
 		return jhash_3words((__force u32)iph->saddr,
 			(__force u32)iph->daddr, iph->protocol, jhash_initval);
 
@@ -57,7 +57,8 @@
 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 	u32 a, b, c;
 
-	if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) {
+	if ((__force u32)ip6h->saddr.s6_addr32[3] <
+	    (__force u32)ip6h->daddr.s6_addr32[3]) {
 		a = (__force u32) ip6h->saddr.s6_addr32[3];
 		b = (__force u32) ip6h->daddr.s6_addr32[3];
 	} else {
@@ -65,7 +66,8 @@
 		a = (__force u32) ip6h->daddr.s6_addr32[3];
 	}
 
-	if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1])
+	if ((__force u32)ip6h->saddr.s6_addr32[1] <
+	    (__force u32)ip6h->daddr.s6_addr32[1])
 		c = (__force u32) ip6h->saddr.s6_addr32[1];
 	else
 		c = (__force u32) ip6h->daddr.s6_addr32[1];
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 846f895..a5e673d 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -269,7 +269,7 @@
 						mss <<= 8;
 						mss |= optp[2];
 
-						mss = ntohs(mss);
+						mss = ntohs((__force __be16)mss);
 						break;
 					case OSFOPT_TS:
 						loop_cont = 1;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7cb7867..aacfb1d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1364,7 +1364,7 @@
 	if (NULL == siocb->scm)
 		siocb->scm = &scm;
 
-	err = scm_send(sock, msg, siocb->scm);
+	err = scm_send(sock, msg, siocb->scm, true);
 	if (err < 0)
 		return err;
 
diff --git a/net/packet/Kconfig b/net/packet/Kconfig
index 0060e3b..cc55b35 100644
--- a/net/packet/Kconfig
+++ b/net/packet/Kconfig
@@ -14,3 +14,11 @@
 	  be called af_packet.
 
 	  If unsure, say Y.
+
+config PACKET_DIAG
+	tristate "Packet: sockets monitoring interface"
+	depends on PACKET
+	default n
+	---help---
+	  Support for PF_PACKET sockets monitoring interface used by the ss tool.
+	  If unsure, say Y.
diff --git a/net/packet/Makefile b/net/packet/Makefile
index 81183ea..9df6134 100644
--- a/net/packet/Makefile
+++ b/net/packet/Makefile
@@ -3,3 +3,5 @@
 #
 
 obj-$(CONFIG_PACKET) += af_packet.o
+obj-$(CONFIG_PACKET_DIAG) += af_packet_diag.o
+af_packet_diag-y += diag.o
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d147317..5dafe84 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -93,6 +93,8 @@
 #include <net/inet_common.h>
 #endif
 
+#include "internal.h"
+
 /*
    Assumptions:
    - if device has no dev->hard_header routine, it adds and removes ll header
@@ -146,14 +148,6 @@
 
 /* Private packet socket structures. */
 
-struct packet_mclist {
-	struct packet_mclist	*next;
-	int			ifindex;
-	int			count;
-	unsigned short		type;
-	unsigned short		alen;
-	unsigned char		addr[MAX_ADDR_LEN];
-};
 /* identical to struct packet_mreq except it has
  * a longer address field.
  */
@@ -175,63 +169,7 @@
 #define BLK_PLUS_PRIV(sz_of_priv) \
 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 
-/* kbdq - kernel block descriptor queue */
-struct tpacket_kbdq_core {
-	struct pgv	*pkbdq;
-	unsigned int	feature_req_word;
-	unsigned int	hdrlen;
-	unsigned char	reset_pending_on_curr_blk;
-	unsigned char   delete_blk_timer;
-	unsigned short	kactive_blk_num;
-	unsigned short	blk_sizeof_priv;
-
-	/* last_kactive_blk_num:
-	 * trick to see if user-space has caught up
-	 * in order to avoid refreshing timer when every single pkt arrives.
-	 */
-	unsigned short	last_kactive_blk_num;
-
-	char		*pkblk_start;
-	char		*pkblk_end;
-	int		kblk_size;
-	unsigned int	knum_blocks;
-	uint64_t	knxt_seq_num;
-	char		*prev;
-	char		*nxt_offset;
-	struct sk_buff	*skb;
-
-	atomic_t	blk_fill_in_prog;
-
-	/* Default is set to 8ms */
-#define DEFAULT_PRB_RETIRE_TOV	(8)
-
-	unsigned short  retire_blk_tov;
-	unsigned short  version;
-	unsigned long	tov_in_jiffies;
-
-	/* timer to retire an outstanding block */
-	struct timer_list retire_blk_timer;
-};
-
 #define PGV_FROM_VMALLOC 1
-struct pgv {
-	char *buffer;
-};
-
-struct packet_ring_buffer {
-	struct pgv		*pg_vec;
-	unsigned int		head;
-	unsigned int		frames_per_block;
-	unsigned int		frame_size;
-	unsigned int		frame_max;
-
-	unsigned int		pg_vec_order;
-	unsigned int		pg_vec_pages;
-	unsigned int		pg_vec_len;
-
-	struct tpacket_kbdq_core	prb_bdqc;
-	atomic_t		pending;
-};
 
 #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
@@ -269,52 +207,6 @@
 		struct tpacket3_hdr *);
 static void packet_flush_mclist(struct sock *sk);
 
-struct packet_fanout;
-struct packet_sock {
-	/* struct sock has to be the first member of packet_sock */
-	struct sock		sk;
-	struct packet_fanout	*fanout;
-	struct tpacket_stats	stats;
-	union  tpacket_stats_u	stats_u;
-	struct packet_ring_buffer	rx_ring;
-	struct packet_ring_buffer	tx_ring;
-	int			copy_thresh;
-	spinlock_t		bind_lock;
-	struct mutex		pg_vec_lock;
-	unsigned int		running:1,	/* prot_hook is attached*/
-				auxdata:1,
-				origdev:1,
-				has_vnet_hdr:1;
-	int			ifindex;	/* bound device		*/
-	__be16			num;
-	struct packet_mclist	*mclist;
-	atomic_t		mapped;
-	enum tpacket_versions	tp_version;
-	unsigned int		tp_hdrlen;
-	unsigned int		tp_reserve;
-	unsigned int		tp_loss:1;
-	unsigned int		tp_tstamp;
-	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
-};
-
-#define PACKET_FANOUT_MAX	256
-
-struct packet_fanout {
-#ifdef CONFIG_NET_NS
-	struct net		*net;
-#endif
-	unsigned int		num_members;
-	u16			id;
-	u8			type;
-	u8			defrag;
-	atomic_t		rr_cur;
-	struct list_head	list;
-	struct sock		*arr[PACKET_FANOUT_MAX];
-	spinlock_t		lock;
-	atomic_t		sk_ref;
-	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
-};
-
 struct packet_skb_cb {
 	unsigned int origlen;
 	union {
@@ -334,11 +226,6 @@
 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 	((x)->kactive_blk_num+1) : 0)
 
-static struct packet_sock *pkt_sk(struct sock *sk)
-{
-	return (struct packet_sock *)sk;
-}
-
 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 static void __fanout_link(struct sock *sk, struct packet_sock *po);
 
@@ -968,7 +855,8 @@
 		ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
 		ppd->tp_status = TP_STATUS_VLAN_VALID;
 	} else {
-		ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
+		ppd->hv1.tp_vlan_tci = 0;
+		ppd->tp_status = TP_STATUS_AVAILABLE;
 	}
 }
 
@@ -1079,7 +967,7 @@
 	default:
 		WARN(1, "TPACKET version not supported\n");
 		BUG();
-		return 0;
+		return NULL;
 	}
 }
 
@@ -1243,7 +1131,8 @@
 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
 }
 
-static DEFINE_MUTEX(fanout_mutex);
+DEFINE_MUTEX(fanout_mutex);
+EXPORT_SYMBOL_GPL(fanout_mutex);
 static LIST_HEAD(fanout_list);
 
 static void __fanout_link(struct sock *sk, struct packet_sock *po)
@@ -1273,6 +1162,14 @@
 	spin_unlock(&f->lock);
 }
 
+bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+{
+	if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
+		return true;
+
+	return false;
+}
+
 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 {
 	struct packet_sock *po = pkt_sk(sk);
@@ -1325,6 +1222,7 @@
 		match->prot_hook.dev = po->prot_hook.dev;
 		match->prot_hook.func = packet_rcv_fanout;
 		match->prot_hook.af_packet_priv = match;
+		match->prot_hook.id_match = match_fanout_group;
 		dev_add_pack(&match->prot_hook);
 		list_add(&match->list, &fanout_list);
 	}
@@ -1355,9 +1253,9 @@
 	if (!f)
 		return;
 
+	mutex_lock(&fanout_mutex);
 	po->fanout = NULL;
 
-	mutex_lock(&fanout_mutex);
 	if (atomic_dec_and_test(&f->sk_ref)) {
 		list_del(&f->list);
 		dev_remove_pack(&f->prot_hook);
@@ -1936,7 +1834,6 @@
 
 	if (likely(po->tx_ring.pg_vec)) {
 		ph = skb_shinfo(skb)->destructor_arg;
-		BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
 		BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
 		atomic_dec(&po->tx_ring.pending);
 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE);
@@ -2055,7 +1952,7 @@
 	int tp_len, size_max;
 	unsigned char *addr;
 	int len_sum = 0;
-	int status = 0;
+	int status = TP_STATUS_AVAILABLE;
 	int hlen, tlen;
 
 	mutex_lock(&po->pg_vec_lock);
@@ -2420,10 +2317,13 @@
 	net = sock_net(sk);
 	po = pkt_sk(sk);
 
-	spin_lock_bh(&net->packet.sklist_lock);
+	mutex_lock(&net->packet.sklist_lock);
 	sk_del_node_init_rcu(sk);
+	mutex_unlock(&net->packet.sklist_lock);
+
+	preempt_disable();
 	sock_prot_inuse_add(net, sk->sk_prot, -1);
-	spin_unlock_bh(&net->packet.sklist_lock);
+	preempt_enable();
 
 	spin_lock(&po->bind_lock);
 	unregister_prot_hook(sk, false);
@@ -2622,10 +2522,13 @@
 		register_prot_hook(sk);
 	}
 
-	spin_lock_bh(&net->packet.sklist_lock);
+	mutex_lock(&net->packet.sklist_lock);
 	sk_add_node_rcu(sk, &net->packet.sklist);
+	mutex_unlock(&net->packet.sklist_lock);
+
+	preempt_disable();
 	sock_prot_inuse_add(net, &packet_proto, 1);
-	spin_unlock_bh(&net->packet.sklist_lock);
+	preempt_enable();
 
 	return 0;
 out:
@@ -3878,7 +3781,7 @@
 
 static int __net_init packet_net_init(struct net *net)
 {
-	spin_lock_init(&net->packet.sklist_lock);
+	mutex_init(&net->packet.sklist_lock);
 	INIT_HLIST_HEAD(&net->packet.sklist);
 
 	if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
diff --git a/net/packet/diag.c b/net/packet/diag.c
new file mode 100644
index 0000000..39bce0d
--- /dev/null
+++ b/net/packet/diag.c
@@ -0,0 +1,242 @@
+#include <linux/module.h>
+#include <linux/sock_diag.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/packet_diag.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+#include "internal.h"
+
+static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
+{
+	struct packet_diag_info pinfo;
+
+	pinfo.pdi_index = po->ifindex;
+	pinfo.pdi_version = po->tp_version;
+	pinfo.pdi_reserve = po->tp_reserve;
+	pinfo.pdi_copy_thresh = po->copy_thresh;
+	pinfo.pdi_tstamp = po->tp_tstamp;
+
+	pinfo.pdi_flags = 0;
+	if (po->running)
+		pinfo.pdi_flags |= PDI_RUNNING;
+	if (po->auxdata)
+		pinfo.pdi_flags |= PDI_AUXDATA;
+	if (po->origdev)
+		pinfo.pdi_flags |= PDI_ORIGDEV;
+	if (po->has_vnet_hdr)
+		pinfo.pdi_flags |= PDI_VNETHDR;
+	if (po->tp_loss)
+		pinfo.pdi_flags |= PDI_LOSS;
+
+	return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
+}
+
+static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
+{
+	struct nlattr *mca;
+	struct packet_mclist *ml;
+
+	mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
+	if (!mca)
+		return -EMSGSIZE;
+
+	rtnl_lock();
+	for (ml = po->mclist; ml; ml = ml->next) {
+		struct packet_diag_mclist *dml;
+
+		dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
+		if (!dml) {
+			rtnl_unlock();
+			nla_nest_cancel(nlskb, mca);
+			return -EMSGSIZE;
+		}
+
+		dml->pdmc_index = ml->ifindex;
+		dml->pdmc_type = ml->type;
+		dml->pdmc_alen = ml->alen;
+		dml->pdmc_count = ml->count;
+		BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
+		memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
+	}
+
+	rtnl_unlock();
+	nla_nest_end(nlskb, mca);
+
+	return 0;
+}
+
+static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
+		struct sk_buff *nlskb)
+{
+	struct packet_diag_ring pdr;
+
+	if (!ring->pg_vec || ((ver > TPACKET_V2) &&
+				(nl_type == PACKET_DIAG_TX_RING)))
+		return 0;
+
+	pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
+	pdr.pdr_block_nr = ring->pg_vec_len;
+	pdr.pdr_frame_size = ring->frame_size;
+	pdr.pdr_frame_nr = ring->frame_max + 1;
+
+	if (ver > TPACKET_V2) {
+		pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
+		pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
+		pdr.pdr_features = ring->prb_bdqc.feature_req_word;
+	} else {
+		pdr.pdr_retire_tmo = 0;
+		pdr.pdr_sizeof_priv = 0;
+		pdr.pdr_features = 0;
+	}
+
+	return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
+}
+
+static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
+{
+	int ret;
+
+	mutex_lock(&po->pg_vec_lock);
+	ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
+			PACKET_DIAG_RX_RING, skb);
+	if (!ret)
+		ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
+				PACKET_DIAG_TX_RING, skb);
+	mutex_unlock(&po->pg_vec_lock);
+
+	return ret;
+}
+
+static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
+{
+	int ret = 0;
+
+	mutex_lock(&fanout_mutex);
+	if (po->fanout) {
+		u32 val;
+
+		val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
+		ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
+	}
+	mutex_unlock(&fanout_mutex);
+
+	return ret;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct packet_diag_req *req,
+		u32 pid, u32 seq, u32 flags, int sk_ino)
+{
+	struct nlmsghdr *nlh;
+	struct packet_diag_msg *rp;
+	struct packet_sock *po = pkt_sk(sk);
+
+	nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	rp = nlmsg_data(nlh);
+	rp->pdiag_family = AF_PACKET;
+	rp->pdiag_type = sk->sk_type;
+	rp->pdiag_num = ntohs(po->num);
+	rp->pdiag_ino = sk_ino;
+	sock_diag_save_cookie(sk, rp->pdiag_cookie);
+
+	if ((req->pdiag_show & PACKET_SHOW_INFO) &&
+			pdiag_put_info(po, skb))
+		goto out_nlmsg_trim;
+
+	if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
+			pdiag_put_mclist(po, skb))
+		goto out_nlmsg_trim;
+
+	if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
+			pdiag_put_rings_cfg(po, skb))
+		goto out_nlmsg_trim;
+
+	if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
+			pdiag_put_fanout(po, skb))
+		goto out_nlmsg_trim;
+
+	return nlmsg_end(skb, nlh);
+
+out_nlmsg_trim:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	int num = 0, s_num = cb->args[0];
+	struct packet_diag_req *req;
+	struct net *net;
+	struct sock *sk;
+	struct hlist_node *node;
+
+	net = sock_net(skb->sk);
+	req = nlmsg_data(cb->nlh);
+
+	mutex_lock(&net->packet.sklist_lock);
+	sk_for_each(sk, node, &net->packet.sklist) {
+		if (!net_eq(sock_net(sk), net))
+			continue;
+		if (num < s_num)
+			goto next;
+
+		if (sk_diag_fill(sk, skb, req, NETLINK_CB(cb->skb).pid,
+					cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					sock_i_ino(sk)) < 0)
+			goto done;
+next:
+		num++;
+	}
+done:
+	mutex_unlock(&net->packet.sklist_lock);
+	cb->args[0] = num;
+
+	return skb->len;
+}
+
+static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+	int hdrlen = sizeof(struct packet_diag_req);
+	struct net *net = sock_net(skb->sk);
+	struct packet_diag_req *req;
+
+	if (nlmsg_len(h) < hdrlen)
+		return -EINVAL;
+
+	req = nlmsg_data(h);
+	/* Make it possible to support protocol filtering later */
+	if (req->sdiag_protocol)
+		return -EINVAL;
+
+	if (h->nlmsg_flags & NLM_F_DUMP) {
+		struct netlink_dump_control c = {
+			.dump = packet_diag_dump,
+		};
+		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+	} else
+		return -EOPNOTSUPP;
+}
+
+static const struct sock_diag_handler packet_diag_handler = {
+	.family = AF_PACKET,
+	.dump = packet_diag_handler_dump,
+};
+
+static int __init packet_diag_init(void)
+{
+	return sock_diag_register(&packet_diag_handler);
+}
+
+static void __exit packet_diag_exit(void)
+{
+	sock_diag_unregister(&packet_diag_handler);
+}
+
+module_init(packet_diag_init);
+module_exit(packet_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);
diff --git a/net/packet/internal.h b/net/packet/internal.h
new file mode 100644
index 0000000..44945f6
--- /dev/null
+++ b/net/packet/internal.h
@@ -0,0 +1,121 @@
+#ifndef __PACKET_INTERNAL_H__
+#define __PACKET_INTERNAL_H__
+
+struct packet_mclist {
+	struct packet_mclist	*next;
+	int			ifindex;
+	int			count;
+	unsigned short		type;
+	unsigned short		alen;
+	unsigned char		addr[MAX_ADDR_LEN];
+};
+
+/* kbdq - kernel block descriptor queue */
+struct tpacket_kbdq_core {
+	struct pgv	*pkbdq;
+	unsigned int	feature_req_word;
+	unsigned int	hdrlen;
+	unsigned char	reset_pending_on_curr_blk;
+	unsigned char   delete_blk_timer;
+	unsigned short	kactive_blk_num;
+	unsigned short	blk_sizeof_priv;
+
+	/* last_kactive_blk_num:
+	 * trick to see if user-space has caught up
+	 * in order to avoid refreshing timer when every single pkt arrives.
+	 */
+	unsigned short	last_kactive_blk_num;
+
+	char		*pkblk_start;
+	char		*pkblk_end;
+	int		kblk_size;
+	unsigned int	knum_blocks;
+	uint64_t	knxt_seq_num;
+	char		*prev;
+	char		*nxt_offset;
+	struct sk_buff	*skb;
+
+	atomic_t	blk_fill_in_prog;
+
+	/* Default is set to 8ms */
+#define DEFAULT_PRB_RETIRE_TOV	(8)
+
+	unsigned short  retire_blk_tov;
+	unsigned short  version;
+	unsigned long	tov_in_jiffies;
+
+	/* timer to retire an outstanding block */
+	struct timer_list retire_blk_timer;
+};
+
+struct pgv {
+	char *buffer;
+};
+
+struct packet_ring_buffer {
+	struct pgv		*pg_vec;
+	unsigned int		head;
+	unsigned int		frames_per_block;
+	unsigned int		frame_size;
+	unsigned int		frame_max;
+
+	unsigned int		pg_vec_order;
+	unsigned int		pg_vec_pages;
+	unsigned int		pg_vec_len;
+
+	struct tpacket_kbdq_core	prb_bdqc;
+	atomic_t		pending;
+};
+
+extern struct mutex fanout_mutex;
+#define PACKET_FANOUT_MAX	256
+
+struct packet_fanout {
+#ifdef CONFIG_NET_NS
+	struct net		*net;
+#endif
+	unsigned int		num_members;
+	u16			id;
+	u8			type;
+	u8			defrag;
+	atomic_t		rr_cur;
+	struct list_head	list;
+	struct sock		*arr[PACKET_FANOUT_MAX];
+	spinlock_t		lock;
+	atomic_t		sk_ref;
+	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
+};
+
+struct packet_sock {
+	/* struct sock has to be the first member of packet_sock */
+	struct sock		sk;
+	struct packet_fanout	*fanout;
+	struct tpacket_stats	stats;
+	union  tpacket_stats_u	stats_u;
+	struct packet_ring_buffer	rx_ring;
+	struct packet_ring_buffer	tx_ring;
+	int			copy_thresh;
+	spinlock_t		bind_lock;
+	struct mutex		pg_vec_lock;
+	unsigned int		running:1,	/* prot_hook is attached*/
+				auxdata:1,
+				origdev:1,
+				has_vnet_hdr:1;
+	int			ifindex;	/* bound device		*/
+	__be16			num;
+	struct packet_mclist	*mclist;
+	atomic_t		mapped;
+	enum tpacket_versions	tp_version;
+	unsigned int		tp_hdrlen;
+	unsigned int		tp_reserve;
+	unsigned int		tp_loss:1;
+	unsigned int		tp_tstamp;
+	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
+};
+
+static struct packet_sock *pkt_sk(struct sock *sk)
+{
+	return (struct packet_sock *)sk;
+}
+
+#endif
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index af95c8e..a65ee78 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@
 	struct rds_connection *conn;
 	struct rds_tcp_connection *tc;
 
-	read_lock_bh(&sk->sk_callback_lock);
+	read_lock(&sk->sk_callback_lock);
 	conn = sk->sk_user_data;
 	if (!conn) {
 		state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@
 			break;
 	}
 out:
-	read_unlock_bh(&sk->sk_callback_lock);
+	read_unlock(&sk->sk_callback_lock);
 	state_change(sk);
 }
 
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 7298137..7787537 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -114,7 +114,7 @@
 
 	rdsdebug("listen data ready sk %p\n", sk);
 
-	read_lock_bh(&sk->sk_callback_lock);
+	read_lock(&sk->sk_callback_lock);
 	ready = sk->sk_user_data;
 	if (!ready) { /* check for teardown race */
 		ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@
 		queue_work(rds_wq, &rds_tcp_listen_work);
 
 out:
-	read_unlock_bh(&sk->sk_callback_lock);
+	read_unlock(&sk->sk_callback_lock);
 	ready(sk, bytes);
 }
 
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 6243258..4fac4f2 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -322,7 +322,7 @@
 
 	rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
-	read_lock_bh(&sk->sk_callback_lock);
+	read_lock(&sk->sk_callback_lock);
 	conn = sk->sk_user_data;
 	if (!conn) { /* check for teardown race */
 		ready = sk->sk_data_ready;
@@ -336,7 +336,7 @@
 	if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
 		queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
-	read_unlock_bh(&sk->sk_callback_lock);
+	read_unlock(&sk->sk_callback_lock);
 	ready(sk, bytes);
 }
 
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 1b4fd68..81cf5a4 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -174,7 +174,7 @@
 	struct rds_connection *conn;
 	struct rds_tcp_connection *tc;
 
-	read_lock_bh(&sk->sk_callback_lock);
+	read_lock(&sk->sk_callback_lock);
 	conn = sk->sk_user_data;
 	if (!conn) {
 		write_space = sk->sk_write_space;
@@ -194,7 +194,7 @@
 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
-	read_unlock_bh(&sk->sk_callback_lock);
+	read_unlock(&sk->sk_callback_lock);
 
 	/*
 	 * write_space is only called when data leaves tcp's send queue if
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 752b723..c275bad 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -150,6 +150,20 @@
 	rfkill_led_trigger_event(rfkill);
 }
 
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+	return rfkill->led_trigger.name;
+}
+EXPORT_SYMBOL(rfkill_get_led_trigger_name);
+
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+	BUG_ON(!rfkill);
+
+	rfkill->ledtrigname = name;
+}
+EXPORT_SYMBOL(rfkill_set_led_trigger_name);
+
 static int rfkill_led_trigger_register(struct rfkill *rfkill)
 {
 	rfkill->led_trigger.name = rfkill->ledtrigname
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index f10fb82..05d60859 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -67,6 +67,9 @@
 	struct tcf_common *pc;
 	int ret = 0;
 	int err;
+#ifdef CONFIG_GACT_PROB
+	struct tc_gact_p *p_parm = NULL;
+#endif
 
 	if (nla == NULL)
 		return -EINVAL;
@@ -82,6 +85,12 @@
 #ifndef CONFIG_GACT_PROB
 	if (tb[TCA_GACT_PROB] != NULL)
 		return -EOPNOTSUPP;
+#else
+	if (tb[TCA_GACT_PROB]) {
+		p_parm = nla_data(tb[TCA_GACT_PROB]);
+		if (p_parm->ptype >= MAX_RAND)
+			return -EINVAL;
+	}
 #endif
 
 	pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -103,8 +112,7 @@
 	spin_lock_bh(&gact->tcf_lock);
 	gact->tcf_action = parm->action;
 #ifdef CONFIG_GACT_PROB
-	if (tb[TCA_GACT_PROB] != NULL) {
-		struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
+	if (p_parm) {
 		gact->tcfg_paction = p_parm->paction;
 		gact->tcfg_pval    = p_parm->pval;
 		gact->tcfg_ptype   = p_parm->ptype;
@@ -133,7 +141,7 @@
 
 	spin_lock(&gact->tcf_lock);
 #ifdef CONFIG_GACT_PROB
-	if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
+	if (gact->tcfg_ptype)
 		action = gact_rand[gact->tcfg_ptype](gact);
 	else
 		action = gact->tcf_action;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60e281a..58fb3c7 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -185,7 +185,12 @@
 err2:
 	kfree(tname);
 err1:
-	kfree(pc);
+	if (ret == ACT_P_CREATED) {
+		if (est)
+			gen_kill_estimator(&pc->tcfc_bstats,
+					   &pc->tcfc_rate_est);
+		kfree_rcu(pc, tcfc_rcu);
+	}
 	return err;
 }
 
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fe81cc1..9c0fd0c 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -200,13 +200,12 @@
 out:
 	if (err) {
 		m->tcf_qstats.overlimits++;
-		/* should we be asking for packet to be dropped?
-		 * may make sense for redirect case only
-		 */
-		retval = TC_ACT_SHOT;
-	} else {
+		if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
+			retval = TC_ACT_SHOT;
+		else
+			retval = m->tcf_action;
+	} else
 		retval = m->tcf_action;
-	}
 	spin_unlock(&m->tcf_lock);
 
 	return retval;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 26aa2f6..45c53ab 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -74,7 +74,10 @@
 		p = to_pedit(pc);
 		keys = kmalloc(ksize, GFP_KERNEL);
 		if (keys == NULL) {
-			kfree(pc);
+			if (est)
+				gen_kill_estimator(&pc->tcfc_bstats,
+						   &pc->tcfc_rate_est);
+			kfree_rcu(pc, tcfc_rcu);
 			return -ENOMEM;
 		}
 		ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3922f2a..3714f60 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -131,7 +131,10 @@
 		d = to_defact(pc);
 		ret = alloc_defdata(d, defdata);
 		if (ret < 0) {
-			kfree(pc);
+			if (est)
+				gen_kill_estimator(&pc->tcfc_bstats,
+						   &pc->tcfc_rate_est);
+			kfree_rcu(pc, tcfc_rcu);
 			return ret;
 		}
 		d->tcf_action = parm->action;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 511323e..6c4d5fe 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -324,24 +324,6 @@
 }
 EXPORT_SYMBOL(netif_carrier_off);
 
-/**
- * 	netif_notify_peers - notify network peers about existence of @dev
- * 	@dev: network device
- *
- * Generate traffic such that interested network peers are aware of
- * @dev, such as by generating a gratuitous ARP. This may be used when
- * a device wants to inform the rest of the network about some sort of
- * reconfiguration such as a failover event or virtual machine
- * migration.
- */
-void netif_notify_peers(struct net_device *dev)
-{
-	rtnl_lock();
-	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
-	rtnl_unlock();
-}
-EXPORT_SYMBOL(netif_notify_peers);
-
 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
    under all circumstances. It is difficult to invent anything faster or
    cheaper.
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9af01f3..e4723d3 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -203,6 +203,34 @@
 	return index;
 }
 
+/* Length of the next packet (0 if the queue is empty). */
+static unsigned int qdisc_peek_len(struct Qdisc *sch)
+{
+	struct sk_buff *skb;
+
+	skb = sch->ops->peek(sch);
+	return skb ? qdisc_pkt_len(skb) : 0;
+}
+
+static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
+static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
+			       unsigned int len);
+
+static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
+				    u32 lmax, u32 inv_w, int delta_w)
+{
+	int i;
+
+	/* update qfq-specific data */
+	cl->lmax = lmax;
+	cl->inv_w = inv_w;
+	i = qfq_calc_index(cl->inv_w, cl->lmax);
+
+	cl->grp = &q->groups[i];
+
+	q->wsum += delta_w;
+}
+
 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 			    struct nlattr **tca, unsigned long *arg)
 {
@@ -250,6 +278,8 @@
 		lmax = 1UL << QFQ_MTU_SHIFT;
 
 	if (cl != NULL) {
+		bool need_reactivation = false;
+
 		if (tca[TCA_RATE]) {
 			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
 						    qdisc_root_sleeping_lock(sch),
@@ -258,12 +288,29 @@
 				return err;
 		}
 
-		if (inv_w != cl->inv_w) {
-			sch_tree_lock(sch);
-			q->wsum += delta_w;
-			cl->inv_w = inv_w;
-			sch_tree_unlock(sch);
+		if (lmax == cl->lmax && inv_w == cl->inv_w)
+			return 0; /* nothing to update */
+
+		i = qfq_calc_index(inv_w, lmax);
+		sch_tree_lock(sch);
+		if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
+			/*
+			 * shift cl->F back, to not charge the
+			 * class for the not-yet-served head
+			 * packet
+			 */
+			cl->F = cl->S;
+			/* remove class from its slot in the old group */
+			qfq_deactivate_class(q, cl);
+			need_reactivation = true;
 		}
+
+		qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+
+		if (need_reactivation) /* activate in new group */
+			qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+		sch_tree_unlock(sch);
+
 		return 0;
 	}
 
@@ -273,11 +320,8 @@
 
 	cl->refcnt = 1;
 	cl->common.classid = classid;
-	cl->lmax = lmax;
-	cl->inv_w = inv_w;
-	i = qfq_calc_index(cl->inv_w, cl->lmax);
 
-	cl->grp = &q->groups[i];
+	qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
 
 	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
 				      &pfifo_qdisc_ops, classid);
@@ -294,7 +338,6 @@
 			return err;
 		}
 	}
-	q->wsum += weight;
 
 	sch_tree_lock(sch);
 	qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -711,15 +754,6 @@
 	}
 }
 
-/* What is length of next packet in queue (0 if queue is empty) */
-static unsigned int qdisc_peek_len(struct Qdisc *sch)
-{
-	struct sk_buff *skb;
-
-	skb = sch->ops->peek(sch);
-	return skb ? qdisc_pkt_len(skb) : 0;
-}
-
 /*
  * Updates the class, returns true if also the group needs to be updated.
  */
@@ -843,11 +877,8 @@
 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
-	struct qfq_group *grp;
 	struct qfq_class *cl;
 	int err;
-	u64 roundedS;
-	int s;
 
 	cl = qfq_classify(skb, sch, &err);
 	if (cl == NULL) {
@@ -876,11 +907,25 @@
 		return err;
 
 	/* If reach this point, queue q was idle */
-	grp = cl->grp;
+	qfq_activate_class(q, cl, qdisc_pkt_len(skb));
+
+	return err;
+}
+
+/*
+ * Handle class switch from idle to backlogged.
+ */
+static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
+			       unsigned int pkt_len)
+{
+	struct qfq_group *grp = cl->grp;
+	u64 roundedS;
+	int s;
+
 	qfq_update_start(q, cl);
 
 	/* compute new finish time and rounded start. */
-	cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
+	cl->F = cl->S + (u64)pkt_len * cl->inv_w;
 	roundedS = qfq_round_down(cl->S, grp->slot_shift);
 
 	/*
@@ -917,8 +962,6 @@
 
 skip_update:
 	qfq_slot_insert(grp, cl, roundedS);
-
-	return err;
 }
 
 
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index ebaef3e..b1ef3bc 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -82,6 +82,7 @@
 					  sctp_scope_t scope,
 					  gfp_t gfp)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_sock *sp;
 	int i;
 	sctp_paramhdr_t *p;
@@ -124,7 +125,7 @@
 	 * socket values.
 	 */
 	asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
-	asoc->pf_retrans  = sctp_pf_retrans;
+	asoc->pf_retrans  = net->sctp.pf_retrans;
 
 	asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
 	asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -175,7 +176,7 @@
 	asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-		min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
+		min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
 
 	/* Initializes the timers */
 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -281,7 +282,7 @@
 	 * and will revert old behavior.
 	 */
 	asoc->peer.asconf_capable = 0;
-	if (sctp_addip_noauth)
+	if (net->sctp.addip_noauth)
 		asoc->peer.asconf_capable = 1;
 	asoc->asconf_addr_del_pending = NULL;
 	asoc->src_out_of_asoc_ok = 0;
@@ -641,6 +642,7 @@
 					   const gfp_t gfp,
 					   const int peer_state)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	struct sctp_transport *peer;
 	struct sctp_sock *sp;
 	unsigned short port;
@@ -674,7 +676,7 @@
 		return peer;
 	}
 
-	peer = sctp_transport_new(addr, gfp);
+	peer = sctp_transport_new(net, addr, gfp);
 	if (!peer)
 		return NULL;
 
@@ -1089,13 +1091,15 @@
 
 /* Is this the association we are looking for? */
 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
+					   struct net *net,
 					   const union sctp_addr *laddr,
 					   const union sctp_addr *paddr)
 {
 	struct sctp_transport *transport;
 
 	if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
-	    (htons(asoc->peer.port) == paddr->v4.sin_port)) {
+	    (htons(asoc->peer.port) == paddr->v4.sin_port) &&
+	    net_eq(sock_net(asoc->base.sk), net)) {
 		transport = sctp_assoc_lookup_paddr(asoc, paddr);
 		if (!transport)
 			goto out;
@@ -1116,6 +1120,7 @@
 	struct sctp_association *asoc =
 		container_of(work, struct sctp_association,
 			     base.inqueue.immediate);
+	struct net *net = sock_net(asoc->base.sk);
 	struct sctp_endpoint *ep;
 	struct sctp_chunk *chunk;
 	struct sctp_inq *inqueue;
@@ -1148,13 +1153,13 @@
 		if (sctp_chunk_is_data(chunk))
 			asoc->peer.last_data_from = chunk->transport;
 		else
-			SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+			SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
 
 		if (chunk->transport)
 			chunk->transport->last_time_heard = jiffies;
 
 		/* Run through the state machine. */
-		error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype,
+		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
 				   state, ep, asoc, chunk, GFP_ATOMIC);
 
 		/* Check to see if the association is freed in response to
@@ -1414,6 +1419,7 @@
 /* Should we send a SACK to update our peer? */
 static inline int sctp_peer_needs_update(struct sctp_association *asoc)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	switch (asoc->state) {
 	case SCTP_STATE_ESTABLISHED:
 	case SCTP_STATE_SHUTDOWN_PENDING:
@@ -1421,7 +1427,7 @@
 	case SCTP_STATE_SHUTDOWN_SENT:
 		if ((asoc->rwnd > asoc->a_rwnd) &&
 		    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
-			   (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift),
+			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
 			   asoc->pathmtu)))
 			return 1;
 		break;
@@ -1542,7 +1548,8 @@
 	if (asoc->peer.ipv6_address)
 		flags |= SCTP_ADDR6_PEERSUPP;
 
-	return sctp_bind_addr_copy(&asoc->base.bind_addr,
+	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
+				   &asoc->base.bind_addr,
 				   &asoc->ep->base.bind_addr,
 				   scope, gfp, flags);
 }
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index bf81204..159b9bc 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -392,13 +392,14 @@
  */
 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	struct sctp_auth_bytes	*secret;
 	struct sctp_shared_key *ep_key;
 
 	/* If we don't support AUTH, or peer is not capable
 	 * we don't need to do anything.
 	 */
-	if (!sctp_auth_enable || !asoc->peer.auth_capable)
+	if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
 		return 0;
 
 	/* If the key_id is non-zero and we couldn't find an
@@ -445,11 +446,12 @@
  */
 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
 {
+	struct net *net = sock_net(ep->base.sk);
 	struct crypto_hash *tfm = NULL;
 	__u16   id;
 
 	/* if the transforms are already allocted, we are done */
-	if (!sctp_auth_enable) {
+	if (!net->sctp.auth_enable) {
 		ep->auth_hmacs = NULL;
 		return 0;
 	}
@@ -674,7 +676,12 @@
 /* Check if peer requested that this chunk is authenticated */
 int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 {
-	if (!sctp_auth_enable || !asoc || !asoc->peer.auth_capable)
+	struct net  *net;
+	if (!asoc)
+		return 0;
+
+	net = sock_net(asoc->base.sk);
+	if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
 		return 0;
 
 	return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
@@ -683,7 +690,12 @@
 /* Check if we requested that peer authenticate this chunk. */
 int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 {
-	if (!sctp_auth_enable || !asoc)
+	struct net *net;
+	if (!asoc)
+		return 0;
+
+	net = sock_net(asoc->base.sk);
+	if (!net->sctp.auth_enable)
 		return 0;
 
 	return __sctp_auth_cid(chunk,
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 4ece451..d886b3b 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -52,8 +52,8 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal helpers. */
-static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *,
-			      sctp_scope_t scope, gfp_t gfp,
+static int sctp_copy_one_addr(struct net *, struct sctp_bind_addr *,
+			      union sctp_addr *, sctp_scope_t scope, gfp_t gfp,
 			      int flags);
 static void sctp_bind_addr_clean(struct sctp_bind_addr *);
 
@@ -62,7 +62,7 @@
 /* Copy 'src' to 'dest' taking 'scope' into account.  Omit addresses
  * in 'src' which have a broader scope than 'scope'.
  */
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
 			const struct sctp_bind_addr *src,
 			sctp_scope_t scope, gfp_t gfp,
 			int flags)
@@ -75,7 +75,7 @@
 
 	/* Extract the addresses which are relevant for this scope.  */
 	list_for_each_entry(addr, &src->address_list, list) {
-		error = sctp_copy_one_addr(dest, &addr->a, scope,
+		error = sctp_copy_one_addr(net, dest, &addr->a, scope,
 					   gfp, flags);
 		if (error < 0)
 			goto out;
@@ -87,7 +87,7 @@
 	 */
 	if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) {
 		list_for_each_entry(addr, &src->address_list, list) {
-			error = sctp_copy_one_addr(dest, &addr->a,
+			error = sctp_copy_one_addr(net, dest, &addr->a,
 						   SCTP_SCOPE_LINK, gfp,
 						   flags);
 			if (error < 0)
@@ -448,7 +448,7 @@
 }
 
 /* Copy out addresses from the global local address list. */
-static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
+static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
 			      union sctp_addr *addr,
 			      sctp_scope_t scope, gfp_t gfp,
 			      int flags)
@@ -456,8 +456,8 @@
 	int error = 0;
 
 	if (sctp_is_any(NULL, addr)) {
-		error = sctp_copy_local_addr_list(dest, scope, gfp, flags);
-	} else if (sctp_in_scope(addr, scope)) {
+		error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags);
+	} else if (sctp_in_scope(net, addr, scope)) {
 		/* Now that the address is in scope, check to see if
 		 * the address type is supported by local sock as
 		 * well as the remote peer.
@@ -494,7 +494,7 @@
 }
 
 /* Is 'addr' valid for 'scope'?  */
-int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, sctp_scope_t scope)
 {
 	sctp_scope_t addr_scope = sctp_scope(addr);
 
@@ -512,7 +512,7 @@
 	 * Address scoping can be selectively controlled via sysctl
 	 * option
 	 */
-	switch (sctp_scope_policy) {
+	switch (net->sctp.scope_policy) {
 	case SCTP_SCOPE_POLICY_DISABLE:
 		return 1;
 	case SCTP_SCOPE_POLICY_ENABLE:
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 6c85564..7c2df9c 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -257,7 +257,7 @@
 	offset = 0;
 
 	if ((whole > 1) || (whole && over))
-		SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS);
+		SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
 
 	/* Create chunks for all the full sized DATA chunks. */
 	for (i=0, len=first_len; i < whole; i++) {
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 68a385d..1859e2b 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -65,6 +65,7 @@
 						struct sock *sk,
 						gfp_t gfp)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_hmac_algo_param *auth_hmacs = NULL;
 	struct sctp_chunks_param *auth_chunks = NULL;
 	struct sctp_shared_key *null_key;
@@ -74,7 +75,7 @@
 	if (!ep->digest)
 		return NULL;
 
-	if (sctp_auth_enable) {
+	if (net->sctp.auth_enable) {
 		/* Allocate space for HMACS and CHUNKS authentication
 		 * variables.  There are arrays that we encode directly
 		 * into parameters to make the rest of the operations easier.
@@ -106,7 +107,7 @@
 		/* If the Add-IP functionality is enabled, we must
 		 * authenticate, ASCONF and ASCONF-ACK chunks
 		 */
-		if (sctp_addip_enable) {
+		if (net->sctp.addip_enable) {
 			auth_chunks->chunks[0] = SCTP_CID_ASCONF;
 			auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
 			auth_chunks->param_hdr.length =
@@ -140,14 +141,14 @@
 	INIT_LIST_HEAD(&ep->asocs);
 
 	/* Use SCTP specific send buffer space queues.  */
-	ep->sndbuf_policy = sctp_sndbuf_policy;
+	ep->sndbuf_policy = net->sctp.sndbuf_policy;
 
 	sk->sk_data_ready = sctp_data_ready;
 	sk->sk_write_space = sctp_write_space;
 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
 	/* Get the receive buffer policy for this endpoint */
-	ep->rcvbuf_policy = sctp_rcvbuf_policy;
+	ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
 
 	/* Initialize the secret key used with cookie. */
 	get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
@@ -302,11 +303,13 @@
 
 /* Is this the endpoint we are looking for?  */
 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
+					       struct net *net,
 					       const union sctp_addr *laddr)
 {
 	struct sctp_endpoint *retval = NULL;
 
-	if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
+	if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
+	    net_eq(sock_net(ep->base.sk), net)) {
 		if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
 					 sctp_sk(ep->base.sk)))
 			retval = ep;
@@ -343,7 +346,8 @@
 
 	rport = ntohs(paddr->v4.sin_port);
 
-	hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
+	hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port,
+				 rport);
 	head = &sctp_assoc_hashtable[hash];
 	read_lock(&head->lock);
 	sctp_for_each_hentry(epb, node, &head->chain) {
@@ -386,13 +390,14 @@
 {
 	struct sctp_sockaddr_entry *addr;
 	struct sctp_bind_addr *bp;
+	struct net *net = sock_net(ep->base.sk);
 
 	bp = &ep->base.bind_addr;
 	/* This function is called with the socket lock held,
 	 * so the address_list can not change.
 	 */
 	list_for_each_entry(addr, &bp->address_list, list) {
-		if (sctp_has_association(&addr->a, paddr))
+		if (sctp_has_association(net, &addr->a, paddr))
 			return 1;
 	}
 
@@ -409,6 +414,7 @@
 			     base.inqueue.immediate);
 	struct sctp_association *asoc;
 	struct sock *sk;
+	struct net *net;
 	struct sctp_transport *transport;
 	struct sctp_chunk *chunk;
 	struct sctp_inq *inqueue;
@@ -423,6 +429,7 @@
 	asoc = NULL;
 	inqueue = &ep->base.inqueue;
 	sk = ep->base.sk;
+	net = sock_net(sk);
 
 	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
@@ -474,12 +481,12 @@
 		if (asoc && sctp_chunk_is_data(chunk))
 			asoc->peer.last_data_from = chunk->transport;
 		else
-			SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+			SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
 
 		if (chunk->transport)
 			chunk->transport->last_time_heard = jiffies;
 
-		error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state,
+		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
 				   ep, asoc, chunk, GFP_ATOMIC);
 
 		if (error && chunk)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index e64d521..25dfe73 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -66,12 +66,15 @@
 
 /* Forward declarations for internal helpers. */
 static int sctp_rcv_ootb(struct sk_buff *);
-static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+				      struct sk_buff *skb,
 				      const union sctp_addr *laddr,
 				      const union sctp_addr *paddr,
 				      struct sctp_transport **transportp);
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr);
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
+						const union sctp_addr *laddr);
 static struct sctp_association *__sctp_lookup_association(
+					struct net *net,
 					const union sctp_addr *local,
 					const union sctp_addr *peer,
 					struct sctp_transport **pt);
@@ -80,7 +83,7 @@
 
 
 /* Calculate the SCTP checksum of an SCTP packet.  */
-static inline int sctp_rcv_checksum(struct sk_buff *skb)
+static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
 {
 	struct sctphdr *sh = sctp_hdr(skb);
 	__le32 cmp = sh->checksum;
@@ -96,7 +99,7 @@
 
 	if (val != cmp) {
 		/* CRC failure, dump it. */
-		SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
+		SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
 		return -1;
 	}
 	return 0;
@@ -129,11 +132,12 @@
 	union sctp_addr dest;
 	int family;
 	struct sctp_af *af;
+	struct net *net = dev_net(skb->dev);
 
 	if (skb->pkt_type!=PACKET_HOST)
 		goto discard_it;
 
-	SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
+	SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
 
 	if (skb_linearize(skb))
 		goto discard_it;
@@ -145,7 +149,7 @@
 	if (skb->len < sizeof(struct sctphdr))
 		goto discard_it;
 	if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
-		  sctp_rcv_checksum(skb) < 0)
+		  sctp_rcv_checksum(net, skb) < 0)
 		goto discard_it;
 
 	skb_pull(skb, sizeof(struct sctphdr));
@@ -178,10 +182,10 @@
 	    !af->addr_valid(&dest, NULL, skb))
 		goto discard_it;
 
-	asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
+	asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
 
 	if (!asoc)
-		ep = __sctp_rcv_lookup_endpoint(&dest);
+		ep = __sctp_rcv_lookup_endpoint(net, &dest);
 
 	/* Retrieve the common input handling substructure. */
 	rcvr = asoc ? &asoc->base : &ep->base;
@@ -200,7 +204,7 @@
 			sctp_endpoint_put(ep);
 			ep = NULL;
 		}
-		sk = sctp_get_ctl_sock();
+		sk = net->sctp.ctl_sock;
 		ep = sctp_sk(sk)->ep;
 		sctp_endpoint_hold(ep);
 		rcvr = &ep->base;
@@ -216,7 +220,7 @@
 	 */
 	if (!asoc) {
 		if (sctp_rcv_ootb(skb)) {
-			SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
+			SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
 			goto discard_release;
 		}
 	}
@@ -272,9 +276,9 @@
 			skb = NULL; /* sctp_chunk_free already freed the skb */
 			goto discard_release;
 		}
-		SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
+		SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
 	} else {
-		SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
+		SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
 		sctp_inq_push(&chunk->rcvr->inqueue, chunk);
 	}
 
@@ -289,7 +293,7 @@
 	return 0;
 
 discard_it:
-	SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
+	SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
 	kfree_skb(skb);
 	return 0;
 
@@ -462,11 +466,13 @@
 		}
 			
 	} else {
+		struct net *net = sock_net(sk);
+
 		if (timer_pending(&t->proto_unreach_timer) &&
 		    del_timer(&t->proto_unreach_timer))
 			sctp_association_put(asoc);
 
-		sctp_do_sm(SCTP_EVENT_T_OTHER,
+		sctp_do_sm(net, SCTP_EVENT_T_OTHER,
 			   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
 			   asoc->state, asoc->ep, asoc, t,
 			   GFP_ATOMIC);
@@ -474,7 +480,7 @@
 }
 
 /* Common lookup code for icmp/icmpv6 error handler. */
-struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
 			     struct sctphdr *sctphdr,
 			     struct sctp_association **app,
 			     struct sctp_transport **tpp)
@@ -503,7 +509,7 @@
 	/* Look for an association that matches the incoming ICMP error
 	 * packet.
 	 */
-	asoc = __sctp_lookup_association(&saddr, &daddr, &transport);
+	asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
 	if (!asoc)
 		return NULL;
 
@@ -539,7 +545,7 @@
 	 * servers this needs to be solved differently.
 	 */
 	if (sock_owned_by_user(sk))
-		NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
+		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
 	*app = asoc;
 	*tpp = transport;
@@ -586,9 +592,10 @@
 	struct inet_sock *inet;
 	sk_buff_data_t saveip, savesctp;
 	int err;
+	struct net *net = dev_net(skb->dev);
 
 	if (skb->len < ihlen + 8) {
-		ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
+		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 		return;
 	}
 
@@ -597,12 +604,12 @@
 	savesctp = skb->transport_header;
 	skb_reset_network_header(skb);
 	skb_set_transport_header(skb, ihlen);
-	sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
+	sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
 	/* Put back, the original values. */
 	skb->network_header = saveip;
 	skb->transport_header = savesctp;
 	if (!sk) {
-		ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
+		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 		return;
 	}
 	/* Warning:  The sock lock is held.  Remember to call
@@ -723,12 +730,13 @@
 /* Insert endpoint into the hash table.  */
 static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
 {
+	struct net *net = sock_net(ep->base.sk);
 	struct sctp_ep_common *epb;
 	struct sctp_hashbucket *head;
 
 	epb = &ep->base;
 
-	epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+	epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 	head = &sctp_ep_hashtable[epb->hashent];
 
 	sctp_write_lock(&head->lock);
@@ -747,12 +755,13 @@
 /* Remove endpoint from the hash table.  */
 static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 {
+	struct net *net = sock_net(ep->base.sk);
 	struct sctp_hashbucket *head;
 	struct sctp_ep_common *epb;
 
 	epb = &ep->base;
 
-	epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+	epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 
 	head = &sctp_ep_hashtable[epb->hashent];
 
@@ -770,7 +779,8 @@
 }
 
 /* Look up an endpoint. */
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
+						const union sctp_addr *laddr)
 {
 	struct sctp_hashbucket *head;
 	struct sctp_ep_common *epb;
@@ -778,16 +788,16 @@
 	struct hlist_node *node;
 	int hash;
 
-	hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
+	hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
 	head = &sctp_ep_hashtable[hash];
 	read_lock(&head->lock);
 	sctp_for_each_hentry(epb, node, &head->chain) {
 		ep = sctp_ep(epb);
-		if (sctp_endpoint_is_match(ep, laddr))
+		if (sctp_endpoint_is_match(ep, net, laddr))
 			goto hit;
 	}
 
-	ep = sctp_sk((sctp_get_ctl_sock()))->ep;
+	ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
 hit:
 	sctp_endpoint_hold(ep);
@@ -798,13 +808,15 @@
 /* Insert association into the hash table.  */
 static void __sctp_hash_established(struct sctp_association *asoc)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	struct sctp_ep_common *epb;
 	struct sctp_hashbucket *head;
 
 	epb = &asoc->base;
 
 	/* Calculate which chain this entry will belong to. */
-	epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port);
+	epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
+					 asoc->peer.port);
 
 	head = &sctp_assoc_hashtable[epb->hashent];
 
@@ -827,12 +839,13 @@
 /* Remove association from the hash table.  */
 static void __sctp_unhash_established(struct sctp_association *asoc)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	struct sctp_hashbucket *head;
 	struct sctp_ep_common *epb;
 
 	epb = &asoc->base;
 
-	epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port,
+	epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
 					 asoc->peer.port);
 
 	head = &sctp_assoc_hashtable[epb->hashent];
@@ -855,6 +868,7 @@
 
 /* Look up an association. */
 static struct sctp_association *__sctp_lookup_association(
+					struct net *net,
 					const union sctp_addr *local,
 					const union sctp_addr *peer,
 					struct sctp_transport **pt)
@@ -869,12 +883,13 @@
 	/* Optimize here for direct hit, only listening connections can
 	 * have wildcards anyways.
 	 */
-	hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
+	hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port),
+				 ntohs(peer->v4.sin_port));
 	head = &sctp_assoc_hashtable[hash];
 	read_lock(&head->lock);
 	sctp_for_each_hentry(epb, node, &head->chain) {
 		asoc = sctp_assoc(epb);
-		transport = sctp_assoc_is_match(asoc, local, peer);
+		transport = sctp_assoc_is_match(asoc, net, local, peer);
 		if (transport)
 			goto hit;
 	}
@@ -892,27 +907,29 @@
 
 /* Look up an association. BH-safe. */
 SCTP_STATIC
-struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr,
+struct sctp_association *sctp_lookup_association(struct net *net,
+						 const union sctp_addr *laddr,
 						 const union sctp_addr *paddr,
 					    struct sctp_transport **transportp)
 {
 	struct sctp_association *asoc;
 
 	sctp_local_bh_disable();
-	asoc = __sctp_lookup_association(laddr, paddr, transportp);
+	asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
 	sctp_local_bh_enable();
 
 	return asoc;
 }
 
 /* Is there an association matching the given local and peer addresses? */
-int sctp_has_association(const union sctp_addr *laddr,
+int sctp_has_association(struct net *net,
+			 const union sctp_addr *laddr,
 			 const union sctp_addr *paddr)
 {
 	struct sctp_association *asoc;
 	struct sctp_transport *transport;
 
-	if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
+	if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
 		sctp_association_put(asoc);
 		return 1;
 	}
@@ -938,7 +955,8 @@
  * in certain circumstances.
  *
  */
-static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
+	struct sk_buff *skb,
 	const union sctp_addr *laddr, struct sctp_transport **transportp)
 {
 	struct sctp_association *asoc;
@@ -978,7 +996,7 @@
 
 		af->from_addr_param(paddr, params.addr, sh->source, 0);
 
-		asoc = __sctp_lookup_association(laddr, paddr, &transport);
+		asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
 		if (asoc)
 			return asoc;
 	}
@@ -1001,6 +1019,7 @@
  * subsequent ASCONF Chunks. If found, proceed to rule D4.
  */
 static struct sctp_association *__sctp_rcv_asconf_lookup(
+					struct net *net,
 					sctp_chunkhdr_t *ch,
 					const union sctp_addr *laddr,
 					__be16 peer_port,
@@ -1020,7 +1039,7 @@
 
 	af->from_addr_param(&paddr, param, peer_port, 0);
 
-	return __sctp_lookup_association(laddr, &paddr, transportp);
+	return __sctp_lookup_association(net, laddr, &paddr, transportp);
 }
 
 
@@ -1033,7 +1052,8 @@
 * This means that any chunks that can help us identify the association need
 * to be looked at to find this association.
 */
-static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
+				      struct sk_buff *skb,
 				      const union sctp_addr *laddr,
 				      struct sctp_transport **transportp)
 {
@@ -1074,8 +1094,9 @@
 			    break;
 
 		    case SCTP_CID_ASCONF:
-			    if (have_auth || sctp_addip_noauth)
-				    asoc = __sctp_rcv_asconf_lookup(ch, laddr,
+			    if (have_auth || net->sctp.addip_noauth)
+				    asoc = __sctp_rcv_asconf_lookup(
+							net, ch, laddr,
 							sctp_hdr(skb)->source,
 							transportp);
 		    default:
@@ -1098,7 +1119,8 @@
  * include looking inside of INIT/INIT-ACK chunks or after the AUTH
  * chunks.
  */
-static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
+				      struct sk_buff *skb,
 				      const union sctp_addr *laddr,
 				      struct sctp_transport **transportp)
 {
@@ -1118,11 +1140,11 @@
 	switch (ch->type) {
 	case SCTP_CID_INIT:
 	case SCTP_CID_INIT_ACK:
-		return __sctp_rcv_init_lookup(skb, laddr, transportp);
+		return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
 		break;
 
 	default:
-		return __sctp_rcv_walk_lookup(skb, laddr, transportp);
+		return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
 		break;
 	}
 
@@ -1131,21 +1153,22 @@
 }
 
 /* Lookup an association for an inbound skb. */
-static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+				      struct sk_buff *skb,
 				      const union sctp_addr *paddr,
 				      const union sctp_addr *laddr,
 				      struct sctp_transport **transportp)
 {
 	struct sctp_association *asoc;
 
-	asoc = __sctp_lookup_association(laddr, paddr, transportp);
+	asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
 
 	/* Further lookup for INIT/INIT-ACK packets.
 	 * SCTP Implementors Guide, 2.18 Handling of address
 	 * parameters within the INIT or INIT-ACK.
 	 */
 	if (!asoc)
-		asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp);
+		asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
 
 	return asoc;
 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ed7139e..ea14cb4 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -99,6 +99,7 @@
 	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
 	struct sctp_sockaddr_entry *addr = NULL;
 	struct sctp_sockaddr_entry *temp;
+	struct net *net = dev_net(ifa->idev->dev);
 	int found = 0;
 
 	switch (ev) {
@@ -110,27 +111,27 @@
 			addr->a.v6.sin6_addr = ifa->addr;
 			addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
 			addr->valid = 1;
-			spin_lock_bh(&sctp_local_addr_lock);
-			list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
-			sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
-			spin_unlock_bh(&sctp_local_addr_lock);
+			spin_lock_bh(&net->sctp.local_addr_lock);
+			list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+			sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+			spin_unlock_bh(&net->sctp.local_addr_lock);
 		}
 		break;
 	case NETDEV_DOWN:
-		spin_lock_bh(&sctp_local_addr_lock);
+		spin_lock_bh(&net->sctp.local_addr_lock);
 		list_for_each_entry_safe(addr, temp,
-					&sctp_local_addr_list, list) {
+					&net->sctp.local_addr_list, list) {
 			if (addr->a.sa.sa_family == AF_INET6 &&
 					ipv6_addr_equal(&addr->a.v6.sin6_addr,
 						&ifa->addr)) {
-				sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
+				sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
 				found = 1;
 				addr->valid = 0;
 				list_del_rcu(&addr->list);
 				break;
 			}
 		}
-		spin_unlock_bh(&sctp_local_addr_lock);
+		spin_unlock_bh(&net->sctp.local_addr_lock);
 		if (found)
 			kfree_rcu(addr, rcu);
 		break;
@@ -154,6 +155,7 @@
 	struct ipv6_pinfo *np;
 	sk_buff_data_t saveip, savesctp;
 	int err;
+	struct net *net = dev_net(skb->dev);
 
 	idev = in6_dev_get(skb->dev);
 
@@ -162,12 +164,12 @@
 	savesctp = skb->transport_header;
 	skb_reset_network_header(skb);
 	skb_set_transport_header(skb, offset);
-	sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
+	sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
 	/* Put back, the original pointers. */
 	skb->network_header   = saveip;
 	skb->transport_header = savesctp;
 	if (!sk) {
-		ICMP6_INC_STATS_BH(dev_net(skb->dev), idev, ICMP6_MIB_INERRORS);
+		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS);
 		goto out;
 	}
 
@@ -241,7 +243,7 @@
 			  __func__, skb, skb->len,
 			  &fl6.saddr, &fl6.daddr);
 
-	SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+	SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
 	if (!(transport->param_flags & SPP_PMTUD_ENABLE))
 		skb->local_df = 1;
@@ -580,7 +582,7 @@
 	if (!(type & IPV6_ADDR_UNICAST))
 		return 0;
 
-	return ipv6_chk_addr(&init_net, in6, NULL, 0);
+	return ipv6_chk_addr(sock_net(&sp->inet.sk), in6, NULL, 0);
 }
 
 /* This function checks if the address is a valid address to be used for
@@ -857,14 +859,14 @@
 		struct net_device *dev;
 
 		if (type & IPV6_ADDR_LINKLOCAL) {
+			struct net *net;
 			if (!addr->v6.sin6_scope_id)
 				return 0;
+			net = sock_net(&opt->inet.sk);
 			rcu_read_lock();
-			dev = dev_get_by_index_rcu(&init_net,
-						   addr->v6.sin6_scope_id);
+			dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
 			if (!dev ||
-			    !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
-					   dev, 0)) {
+			    !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
 				rcu_read_unlock();
 				return 0;
 			}
@@ -897,7 +899,7 @@
 			if (!addr->v6.sin6_scope_id)
 				return 0;
 			rcu_read_lock();
-			dev = dev_get_by_index_rcu(&init_net,
+			dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk),
 						   addr->v6.sin6_scope_id);
 			rcu_read_unlock();
 			if (!dev)
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index 8ef8e7d..fe012c4 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -129,20 +129,20 @@
 };
 
 /* Initialize the objcount in the proc filesystem.  */
-void sctp_dbg_objcnt_init(void)
+void sctp_dbg_objcnt_init(struct net *net)
 {
 	struct proc_dir_entry *ent;
 
 	ent = proc_create("sctp_dbg_objcnt", 0,
-			  proc_net_sctp, &sctp_objcnt_ops);
+			  net->sctp.proc_net_sctp, &sctp_objcnt_ops);
 	if (!ent)
 		pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
 }
 
 /* Cleanup the objcount entry in the proc filesystem.  */
-void sctp_dbg_objcnt_exit(void)
+void sctp_dbg_objcnt_exit(struct net *net)
 {
-	remove_proc_entry("sctp_dbg_objcnt", proc_net_sctp);
+	remove_proc_entry("sctp_dbg_objcnt", net->sctp.proc_net_sctp);
 }
 
 
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 838e18b..0c6359b 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -597,7 +597,7 @@
 	return err;
 no_route:
 	kfree_skb(nskb);
-	IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
+	IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
 
 	/* FIXME: Returning the 'err' will effect all the associations
 	 * associated with a socket, although only one of the paths of the
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e7aa177c..072bf6a 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -299,6 +299,7 @@
 /* Put a new chunk in an sctp_outq.  */
 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 {
+	struct net *net = sock_net(q->asoc->base.sk);
 	int error = 0;
 
 	SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
@@ -337,15 +338,15 @@
 
 			sctp_outq_tail_data(q, chunk);
 			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-				SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
+				SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
 			else
-				SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
+				SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
 			q->empty = 0;
 			break;
 		}
 	} else {
 		list_add_tail(&chunk->list, &q->control_chunk_list);
-		SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 	}
 
 	if (error < 0)
@@ -478,11 +479,12 @@
 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
 		     sctp_retransmit_reason_t reason)
 {
+	struct net *net = sock_net(q->asoc->base.sk);
 	int error = 0;
 
 	switch(reason) {
 	case SCTP_RTXR_T3_RTX:
-		SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);
+		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
 		/* Update the retran path if the T3-rtx timer has expired for
 		 * the current retran path.
@@ -493,15 +495,15 @@
 			transport->asoc->unack_data;
 		break;
 	case SCTP_RTXR_FAST_RTX:
-		SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
+		SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
 		q->fast_rtx = 1;
 		break;
 	case SCTP_RTXR_PMTUD:
-		SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
+		SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
 		break;
 	case SCTP_RTXR_T1_RTX:
-		SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
+		SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
 		transport->asoc->init_retries++;
 		break;
 	default:
@@ -1914,6 +1916,6 @@
 
 	if (ftsn_chunk) {
 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
-		SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
 	}
 }
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 534c7ea..794bb14 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -57,7 +57,7 @@
 
 #define DECLARE_PRIMITIVE(name) \
 /* This is called in the code as sctp_primitive_ ## name.  */ \
-int sctp_primitive_ ## name(struct sctp_association *asoc, \
+int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \
 			    void *arg) { \
 	int error = 0; \
 	sctp_event_t event_type; sctp_subtype_t subtype; \
@@ -69,7 +69,7 @@
 	state = asoc ? asoc->state : SCTP_STATE_CLOSED; \
 	ep = asoc ? asoc->ep : NULL; \
 	\
-	error = sctp_do_sm(event_type, subtype, state, ep, asoc, \
+	error = sctp_do_sm(net, event_type, subtype, state, ep, asoc,	\
 			   arg, GFP_KERNEL); \
 	return error; \
 }
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index dc12feb..c3bea26 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -80,11 +80,12 @@
 /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
 static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 {
+	struct net *net = seq->private;
 	int i;
 
 	for (i = 0; sctp_snmp_list[i].name != NULL; i++)
 		seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
-			   snmp_fold_field((void __percpu **)sctp_statistics,
+			   snmp_fold_field((void __percpu **)net->sctp.sctp_statistics,
 				      sctp_snmp_list[i].entry));
 
 	return 0;
@@ -93,7 +94,7 @@
 /* Initialize the seq file operations for 'snmp' object. */
 static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, sctp_snmp_seq_show, NULL);
+	return single_open_net(inode, file, sctp_snmp_seq_show);
 }
 
 static const struct file_operations sctp_snmp_seq_fops = {
@@ -105,11 +106,12 @@
 };
 
 /* Set up the proc fs entry for 'snmp' object. */
-int __init sctp_snmp_proc_init(void)
+int __net_init sctp_snmp_proc_init(struct net *net)
 {
 	struct proc_dir_entry *p;
 
-	p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops);
+	p = proc_create("snmp", S_IRUGO, net->sctp.proc_net_sctp,
+			&sctp_snmp_seq_fops);
 	if (!p)
 		return -ENOMEM;
 
@@ -117,9 +119,9 @@
 }
 
 /* Cleanup the proc fs entry for 'snmp' object. */
-void sctp_snmp_proc_exit(void)
+void sctp_snmp_proc_exit(struct net *net)
 {
-	remove_proc_entry("snmp", proc_net_sctp);
+	remove_proc_entry("snmp", net->sctp.proc_net_sctp);
 }
 
 /* Dump local addresses of an association/endpoint. */
@@ -213,6 +215,8 @@
 	sctp_for_each_hentry(epb, node, &head->chain) {
 		ep = sctp_ep(epb);
 		sk = epb->sk;
+		if (!net_eq(sock_net(sk), seq_file_net(seq)))
+			continue;
 		seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
 			   sctp_sk(sk)->type, sk->sk_state, hash,
 			   epb->bind_addr.port,
@@ -239,7 +243,8 @@
 /* Initialize the seq file operations for 'eps' object. */
 static int sctp_eps_seq_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &sctp_eps_ops);
+	return seq_open_net(inode, file, &sctp_eps_ops,
+			    sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_eps_seq_fops = {
@@ -250,11 +255,12 @@
 };
 
 /* Set up the proc fs entry for 'eps' object. */
-int __init sctp_eps_proc_init(void)
+int __net_init sctp_eps_proc_init(struct net *net)
 {
 	struct proc_dir_entry *p;
 
-	p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops);
+	p = proc_create("eps", S_IRUGO, net->sctp.proc_net_sctp,
+			&sctp_eps_seq_fops);
 	if (!p)
 		return -ENOMEM;
 
@@ -262,9 +268,9 @@
 }
 
 /* Cleanup the proc fs entry for 'eps' object. */
-void sctp_eps_proc_exit(void)
+void sctp_eps_proc_exit(struct net *net)
 {
-	remove_proc_entry("eps", proc_net_sctp);
+	remove_proc_entry("eps", net->sctp.proc_net_sctp);
 }
 
 
@@ -317,6 +323,8 @@
 	sctp_for_each_hentry(epb, node, &head->chain) {
 		assoc = sctp_assoc(epb);
 		sk = epb->sk;
+		if (!net_eq(sock_net(sk), seq_file_net(seq)))
+			continue;
 		seq_printf(seq,
 			   "%8pK %8pK %-3d %-3d %-2d %-4d "
 			   "%4d %8d %8d %7d %5lu %-5d %5d ",
@@ -356,7 +364,8 @@
 /* Initialize the seq file operations for 'assocs' object. */
 static int sctp_assocs_seq_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &sctp_assoc_ops);
+	return seq_open_net(inode, file, &sctp_assoc_ops,
+			    sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_assocs_seq_fops = {
@@ -367,11 +376,11 @@
 };
 
 /* Set up the proc fs entry for 'assocs' object. */
-int __init sctp_assocs_proc_init(void)
+int __net_init sctp_assocs_proc_init(struct net *net)
 {
 	struct proc_dir_entry *p;
 
-	p = proc_create("assocs", S_IRUGO, proc_net_sctp,
+	p = proc_create("assocs", S_IRUGO, net->sctp.proc_net_sctp,
 			&sctp_assocs_seq_fops);
 	if (!p)
 		return -ENOMEM;
@@ -380,9 +389,9 @@
 }
 
 /* Cleanup the proc fs entry for 'assocs' object. */
-void sctp_assocs_proc_exit(void)
+void sctp_assocs_proc_exit(struct net *net)
 {
-	remove_proc_entry("assocs", proc_net_sctp);
+	remove_proc_entry("assocs", net->sctp.proc_net_sctp);
 }
 
 static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
@@ -428,6 +437,8 @@
 	sctp_local_bh_disable();
 	read_lock(&head->lock);
 	sctp_for_each_hentry(epb, node, &head->chain) {
+		if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
+			continue;
 		assoc = sctp_assoc(epb);
 		list_for_each_entry(tsp, &assoc->peer.transport_addr_list,
 					transports) {
@@ -491,14 +502,15 @@
 };
 
 /* Cleanup the proc fs entry for 'remaddr' object. */
-void sctp_remaddr_proc_exit(void)
+void sctp_remaddr_proc_exit(struct net *net)
 {
-	remove_proc_entry("remaddr", proc_net_sctp);
+	remove_proc_entry("remaddr", net->sctp.proc_net_sctp);
 }
 
 static int sctp_remaddr_seq_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &sctp_remaddr_ops);
+	return seq_open_net(inode, file, &sctp_remaddr_ops,
+			    sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_remaddr_seq_fops = {
@@ -508,11 +520,12 @@
 	.release = seq_release,
 };
 
-int __init sctp_remaddr_proc_init(void)
+int __net_init sctp_remaddr_proc_init(struct net *net)
 {
 	struct proc_dir_entry *p;
 
-	p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops);
+	p = proc_create("remaddr", S_IRUGO, net->sctp.proc_net_sctp,
+			&sctp_remaddr_seq_fops);
 	if (!p)
 		return -ENOMEM;
 	return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1f89c4e..2d51842 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -69,21 +69,10 @@
 
 /* Global data structures. */
 struct sctp_globals sctp_globals __read_mostly;
-DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly;
-
-#ifdef CONFIG_PROC_FS
-struct proc_dir_entry	*proc_net_sctp;
-#endif
 
 struct idr sctp_assocs_id;
 DEFINE_SPINLOCK(sctp_assocs_id_lock);
 
-/* This is the global socket data structure used for responding to
- * the Out-of-the-blue (OOTB) packets.  A control sock will be created
- * for this socket at the initialization time.
- */
-static struct sock *sctp_ctl_sock;
-
 static struct sctp_pf *sctp_pf_inet6_specific;
 static struct sctp_pf *sctp_pf_inet_specific;
 static struct sctp_af *sctp_af_v4_specific;
@@ -96,74 +85,54 @@
 int sysctl_sctp_rmem[3];
 int sysctl_sctp_wmem[3];
 
-/* Return the address of the control sock. */
-struct sock *sctp_get_ctl_sock(void)
-{
-	return sctp_ctl_sock;
-}
-
 /* Set up the proc fs entry for the SCTP protocol. */
-static __init int sctp_proc_init(void)
+static __net_init int sctp_proc_init(struct net *net)
 {
-	if (percpu_counter_init(&sctp_sockets_allocated, 0))
-		goto out_nomem;
 #ifdef CONFIG_PROC_FS
-	if (!proc_net_sctp) {
-		proc_net_sctp = proc_mkdir("sctp", init_net.proc_net);
-		if (!proc_net_sctp)
-			goto out_free_percpu;
-	}
-
-	if (sctp_snmp_proc_init())
+	net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net);
+	if (!net->sctp.proc_net_sctp)
+		goto out_proc_net_sctp;
+	if (sctp_snmp_proc_init(net))
 		goto out_snmp_proc_init;
-	if (sctp_eps_proc_init())
+	if (sctp_eps_proc_init(net))
 		goto out_eps_proc_init;
-	if (sctp_assocs_proc_init())
+	if (sctp_assocs_proc_init(net))
 		goto out_assocs_proc_init;
-	if (sctp_remaddr_proc_init())
+	if (sctp_remaddr_proc_init(net))
 		goto out_remaddr_proc_init;
 
 	return 0;
 
 out_remaddr_proc_init:
-	sctp_assocs_proc_exit();
+	sctp_assocs_proc_exit(net);
 out_assocs_proc_init:
-	sctp_eps_proc_exit();
+	sctp_eps_proc_exit(net);
 out_eps_proc_init:
-	sctp_snmp_proc_exit();
+	sctp_snmp_proc_exit(net);
 out_snmp_proc_init:
-	if (proc_net_sctp) {
-		proc_net_sctp = NULL;
-		remove_proc_entry("sctp", init_net.proc_net);
-	}
-out_free_percpu:
-	percpu_counter_destroy(&sctp_sockets_allocated);
-#else
-	return 0;
-#endif /* CONFIG_PROC_FS */
-
-out_nomem:
+	remove_proc_entry("sctp", net->proc_net);
+	net->sctp.proc_net_sctp = NULL;
+out_proc_net_sctp:
 	return -ENOMEM;
+#endif /* CONFIG_PROC_FS */
+	return 0;
 }
 
 /* Clean up the proc fs entry for the SCTP protocol.
  * Note: Do not make this __exit as it is used in the init error
  * path.
  */
-static void sctp_proc_exit(void)
+static void sctp_proc_exit(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-	sctp_snmp_proc_exit();
-	sctp_eps_proc_exit();
-	sctp_assocs_proc_exit();
-	sctp_remaddr_proc_exit();
+	sctp_snmp_proc_exit(net);
+	sctp_eps_proc_exit(net);
+	sctp_assocs_proc_exit(net);
+	sctp_remaddr_proc_exit(net);
 
-	if (proc_net_sctp) {
-		proc_net_sctp = NULL;
-		remove_proc_entry("sctp", init_net.proc_net);
-	}
+	remove_proc_entry("sctp", net->proc_net);
+	net->sctp.proc_net_sctp = NULL;
 #endif
-	percpu_counter_destroy(&sctp_sockets_allocated);
 }
 
 /* Private helper to extract ipv4 address and stash them in
@@ -201,29 +170,29 @@
 /* Extract our IP addresses from the system and stash them in the
  * protocol structure.
  */
-static void sctp_get_local_addr_list(void)
+static void sctp_get_local_addr_list(struct net *net)
 {
 	struct net_device *dev;
 	struct list_head *pos;
 	struct sctp_af *af;
 
 	rcu_read_lock();
-	for_each_netdev_rcu(&init_net, dev) {
+	for_each_netdev_rcu(net, dev) {
 		__list_for_each(pos, &sctp_address_families) {
 			af = list_entry(pos, struct sctp_af, list);
-			af->copy_addrlist(&sctp_local_addr_list, dev);
+			af->copy_addrlist(&net->sctp.local_addr_list, dev);
 		}
 	}
 	rcu_read_unlock();
 }
 
 /* Free the existing local addresses.  */
-static void sctp_free_local_addr_list(void)
+static void sctp_free_local_addr_list(struct net *net)
 {
 	struct sctp_sockaddr_entry *addr;
 	struct list_head *pos, *temp;
 
-	list_for_each_safe(pos, temp, &sctp_local_addr_list) {
+	list_for_each_safe(pos, temp, &net->sctp.local_addr_list) {
 		addr = list_entry(pos, struct sctp_sockaddr_entry, list);
 		list_del(pos);
 		kfree(addr);
@@ -231,17 +200,17 @@
 }
 
 /* Copy the local addresses which are valid for 'scope' into 'bp'.  */
-int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
-			      gfp_t gfp, int copy_flags)
+int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
+			      sctp_scope_t scope, gfp_t gfp, int copy_flags)
 {
 	struct sctp_sockaddr_entry *addr;
 	int error = 0;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+	list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
 		if (!addr->valid)
 			continue;
-		if (sctp_in_scope(&addr->a, scope)) {
+		if (sctp_in_scope(net, &addr->a, scope)) {
 			/* Now that the address is in scope, check to see if
 			 * the address type is really supported by the local
 			 * sock as well as the remote peer.
@@ -397,7 +366,8 @@
 /* Should this be available for binding?   */
 static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
 {
-	int ret = inet_addr_type(&init_net, addr->v4.sin_addr.s_addr);
+	struct net *net = sock_net(&sp->inet.sk);
+	int ret = inet_addr_type(net, addr->v4.sin_addr.s_addr);
 
 
 	if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
@@ -484,7 +454,7 @@
 	SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
 			  __func__, &fl4->daddr, &fl4->saddr);
 
-	rt = ip_route_output_key(&init_net, fl4);
+	rt = ip_route_output_key(sock_net(sk), fl4);
 	if (!IS_ERR(rt))
 		dst = &rt->dst;
 
@@ -530,7 +500,7 @@
 		    (AF_INET == laddr->a.sa.sa_family)) {
 			fl4->saddr = laddr->a.v4.sin_addr.s_addr;
 			fl4->fl4_sport = laddr->a.v4.sin_port;
-			rt = ip_route_output_key(&init_net, fl4);
+			rt = ip_route_output_key(sock_net(sk), fl4);
 			if (!IS_ERR(rt)) {
 				dst = &rt->dst;
 				goto out_unlock;
@@ -627,14 +597,15 @@
 
 void sctp_addr_wq_timeout_handler(unsigned long arg)
 {
+	struct net *net = (struct net *)arg;
 	struct sctp_sockaddr_entry *addrw, *temp;
 	struct sctp_sock *sp;
 
-	spin_lock_bh(&sctp_addr_wq_lock);
+	spin_lock_bh(&net->sctp.addr_wq_lock);
 
-	list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+	list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
 		SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
-		    " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
+		    " for cmd %d at entry %p\n", &net->sctp.addr_waitq, &addrw->a, addrw->state,
 		    addrw);
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -648,7 +619,7 @@
 				goto free_next;
 
 			in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
-			if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
+			if (ipv6_chk_addr(net, in6, NULL, 0) == 0 &&
 			    addrw->state == SCTP_ADDR_NEW) {
 				unsigned long timeo_val;
 
@@ -656,12 +627,12 @@
 				    SCTP_ADDRESS_TICK_DELAY);
 				timeo_val = jiffies;
 				timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
-				mod_timer(&sctp_addr_wq_timer, timeo_val);
+				mod_timer(&net->sctp.addr_wq_timer, timeo_val);
 				break;
 			}
 		}
 #endif
-		list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
+		list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) {
 			struct sock *sk;
 
 			sk = sctp_opt2sk(sp);
@@ -679,31 +650,32 @@
 		list_del(&addrw->list);
 		kfree(addrw);
 	}
-	spin_unlock_bh(&sctp_addr_wq_lock);
+	spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
-static void sctp_free_addr_wq(void)
+static void sctp_free_addr_wq(struct net *net)
 {
 	struct sctp_sockaddr_entry *addrw;
 	struct sctp_sockaddr_entry *temp;
 
-	spin_lock_bh(&sctp_addr_wq_lock);
-	del_timer(&sctp_addr_wq_timer);
-	list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+	spin_lock_bh(&net->sctp.addr_wq_lock);
+	del_timer(&net->sctp.addr_wq_timer);
+	list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
 		list_del(&addrw->list);
 		kfree(addrw);
 	}
-	spin_unlock_bh(&sctp_addr_wq_lock);
+	spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
 /* lookup the entry for the same address in the addr_waitq
  * sctp_addr_wq MUST be locked
  */
-static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
+static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net,
+					struct sctp_sockaddr_entry *addr)
 {
 	struct sctp_sockaddr_entry *addrw;
 
-	list_for_each_entry(addrw, &sctp_addr_waitq, list) {
+	list_for_each_entry(addrw, &net->sctp.addr_waitq, list) {
 		if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
 			continue;
 		if (addrw->a.sa.sa_family == AF_INET) {
@@ -719,7 +691,7 @@
 	return NULL;
 }
 
-void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
+void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd)
 {
 	struct sctp_sockaddr_entry *addrw;
 	unsigned long timeo_val;
@@ -730,38 +702,38 @@
 	 * new address after a couple of addition and deletion of that address
 	 */
 
-	spin_lock_bh(&sctp_addr_wq_lock);
+	spin_lock_bh(&net->sctp.addr_wq_lock);
 	/* Offsets existing events in addr_wq */
-	addrw = sctp_addr_wq_lookup(addr);
+	addrw = sctp_addr_wq_lookup(net, addr);
 	if (addrw) {
 		if (addrw->state != cmd) {
 			SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
 			    " in wq %p\n", addrw->state, &addrw->a,
-			    &sctp_addr_waitq);
+			    &net->sctp.addr_waitq);
 			list_del(&addrw->list);
 			kfree(addrw);
 		}
-		spin_unlock_bh(&sctp_addr_wq_lock);
+		spin_unlock_bh(&net->sctp.addr_wq_lock);
 		return;
 	}
 
 	/* OK, we have to add the new address to the wait queue */
 	addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
 	if (addrw == NULL) {
-		spin_unlock_bh(&sctp_addr_wq_lock);
+		spin_unlock_bh(&net->sctp.addr_wq_lock);
 		return;
 	}
 	addrw->state = cmd;
-	list_add_tail(&addrw->list, &sctp_addr_waitq);
+	list_add_tail(&addrw->list, &net->sctp.addr_waitq);
 	SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
-	    " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
+	    " in wq %p\n", addrw->state, &addrw->a, &net->sctp.addr_waitq);
 
-	if (!timer_pending(&sctp_addr_wq_timer)) {
+	if (!timer_pending(&net->sctp.addr_wq_timer)) {
 		timeo_val = jiffies;
 		timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
-		mod_timer(&sctp_addr_wq_timer, timeo_val);
+		mod_timer(&net->sctp.addr_wq_timer, timeo_val);
 	}
-	spin_unlock_bh(&sctp_addr_wq_lock);
+	spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
 /* Event handler for inet address addition/deletion events.
@@ -776,11 +748,9 @@
 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
 	struct sctp_sockaddr_entry *addr = NULL;
 	struct sctp_sockaddr_entry *temp;
+	struct net *net = dev_net(ifa->ifa_dev->dev);
 	int found = 0;
 
-	if (!net_eq(dev_net(ifa->ifa_dev->dev), &init_net))
-		return NOTIFY_DONE;
-
 	switch (ev) {
 	case NETDEV_UP:
 		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
@@ -789,27 +759,27 @@
 			addr->a.v4.sin_port = 0;
 			addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
 			addr->valid = 1;
-			spin_lock_bh(&sctp_local_addr_lock);
-			list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
-			sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
-			spin_unlock_bh(&sctp_local_addr_lock);
+			spin_lock_bh(&net->sctp.local_addr_lock);
+			list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+			sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+			spin_unlock_bh(&net->sctp.local_addr_lock);
 		}
 		break;
 	case NETDEV_DOWN:
-		spin_lock_bh(&sctp_local_addr_lock);
+		spin_lock_bh(&net->sctp.local_addr_lock);
 		list_for_each_entry_safe(addr, temp,
-					&sctp_local_addr_list, list) {
+					&net->sctp.local_addr_list, list) {
 			if (addr->a.sa.sa_family == AF_INET &&
 					addr->a.v4.sin_addr.s_addr ==
 					ifa->ifa_local) {
-				sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
+				sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
 				found = 1;
 				addr->valid = 0;
 				list_del_rcu(&addr->list);
 				break;
 			}
 		}
-		spin_unlock_bh(&sctp_local_addr_lock);
+		spin_unlock_bh(&net->sctp.local_addr_lock);
 		if (found)
 			kfree_rcu(addr, rcu);
 		break;
@@ -822,7 +792,7 @@
  * Initialize the control inode/socket with a control endpoint data
  * structure.  This endpoint is reserved exclusively for the OOTB processing.
  */
-static int sctp_ctl_sock_init(void)
+static int sctp_ctl_sock_init(struct net *net)
 {
 	int err;
 	sa_family_t family = PF_INET;
@@ -830,14 +800,14 @@
 	if (sctp_get_pf_specific(PF_INET6))
 		family = PF_INET6;
 
-	err = inet_ctl_sock_create(&sctp_ctl_sock, family,
-				   SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
+	err = inet_ctl_sock_create(&net->sctp.ctl_sock, family,
+				   SOCK_SEQPACKET, IPPROTO_SCTP, net);
 
 	/* If IPv6 socket could not be created, try the IPv4 socket */
 	if (err < 0 && family == PF_INET6)
-		err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
+		err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET,
 					   SOCK_SEQPACKET, IPPROTO_SCTP,
-					   &init_net);
+					   net);
 
 	if (err < 0) {
 		pr_err("Failed to create the SCTP control socket\n");
@@ -990,7 +960,7 @@
 	inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
 			 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
 
-	SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+	SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
 	return ip_queue_xmit(skb, &transport->fl);
 }
 
@@ -1063,6 +1033,7 @@
 	.handler     = sctp_rcv,
 	.err_handler = sctp_v4_err,
 	.no_policy   = 1,
+	.netns_ok    = 1,
 };
 
 /* IPv4 address related functions.  */
@@ -1130,16 +1101,16 @@
 	return 1;
 }
 
-static inline int init_sctp_mibs(void)
+static inline int init_sctp_mibs(struct net *net)
 {
-	return snmp_mib_init((void __percpu **)sctp_statistics,
+	return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics,
 			     sizeof(struct sctp_mib),
 			     __alignof__(struct sctp_mib));
 }
 
-static inline void cleanup_sctp_mibs(void)
+static inline void cleanup_sctp_mibs(struct net *net)
 {
-	snmp_mib_free((void __percpu **)sctp_statistics);
+	snmp_mib_free((void __percpu **)net->sctp.sctp_statistics);
 }
 
 static void sctp_v4_pf_init(void)
@@ -1194,6 +1165,143 @@
 	unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
 }
 
+static int sctp_net_init(struct net *net)
+{
+	int status;
+
+	/*
+	 * 14. Suggested SCTP Protocol Parameter Values
+	 */
+	/* The following protocol parameters are RECOMMENDED:  */
+	/* RTO.Initial              - 3  seconds */
+	net->sctp.rto_initial			= SCTP_RTO_INITIAL;
+	/* RTO.Min                  - 1  second */
+	net->sctp.rto_min	 		= SCTP_RTO_MIN;
+	/* RTO.Max                 -  60 seconds */
+	net->sctp.rto_max 			= SCTP_RTO_MAX;
+	/* RTO.Alpha                - 1/8 */
+	net->sctp.rto_alpha			= SCTP_RTO_ALPHA;
+	/* RTO.Beta                 - 1/4 */
+	net->sctp.rto_beta			= SCTP_RTO_BETA;
+
+	/* Valid.Cookie.Life        - 60  seconds */
+	net->sctp.valid_cookie_life		= SCTP_DEFAULT_COOKIE_LIFE;
+
+	/* Whether Cookie Preservative is enabled(1) or not(0) */
+	net->sctp.cookie_preserve_enable 	= 1;
+
+	/* Max.Burst		    - 4 */
+	net->sctp.max_burst			= SCTP_DEFAULT_MAX_BURST;
+
+	/* Association.Max.Retrans  - 10 attempts
+	 * Path.Max.Retrans         - 5  attempts (per destination address)
+	 * Max.Init.Retransmits     - 8  attempts
+	 */
+	net->sctp.max_retrans_association	= 10;
+	net->sctp.max_retrans_path		= 5;
+	net->sctp.max_retrans_init		= 8;
+
+	/* Sendbuffer growth	    - do per-socket accounting */
+	net->sctp.sndbuf_policy			= 0;
+
+	/* Rcvbuffer growth	    - do per-socket accounting */
+	net->sctp.rcvbuf_policy			= 0;
+
+	/* HB.interval              - 30 seconds */
+	net->sctp.hb_interval			= SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
+
+	/* delayed SACK timeout */
+	net->sctp.sack_timeout			= SCTP_DEFAULT_TIMEOUT_SACK;
+
+	/* Disable ADDIP by default. */
+	net->sctp.addip_enable = 0;
+	net->sctp.addip_noauth = 0;
+	net->sctp.default_auto_asconf = 0;
+
+	/* Enable PR-SCTP by default. */
+	net->sctp.prsctp_enable = 1;
+
+	/* Disable AUTH by default. */
+	net->sctp.auth_enable = 0;
+
+	/* Set SCOPE policy to enabled */
+	net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
+
+	/* Set the default rwnd update threshold */
+	net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
+
+	/* Initialize maximum autoclose timeout. */
+	net->sctp.max_autoclose		= INT_MAX / HZ;
+
+	status = sctp_sysctl_net_register(net);
+	if (status)
+		goto err_sysctl_register;
+
+	/* Allocate and initialise sctp mibs.  */
+	status = init_sctp_mibs(net);
+	if (status)
+		goto err_init_mibs;
+
+	/* Initialize proc fs directory.  */
+	status = sctp_proc_init(net);
+	if (status)
+		goto err_init_proc;
+
+	sctp_dbg_objcnt_init(net);
+
+	/* Initialize the control inode/socket for handling OOTB packets.  */
+	if ((status = sctp_ctl_sock_init(net))) {
+		pr_err("Failed to initialize the SCTP control sock\n");
+		goto err_ctl_sock_init;
+	}
+
+	/* Initialize the local address list. */
+	INIT_LIST_HEAD(&net->sctp.local_addr_list);
+	spin_lock_init(&net->sctp.local_addr_lock);
+	sctp_get_local_addr_list(net);
+
+	/* Initialize the address event list */
+	INIT_LIST_HEAD(&net->sctp.addr_waitq);
+	INIT_LIST_HEAD(&net->sctp.auto_asconf_splist);
+	spin_lock_init(&net->sctp.addr_wq_lock);
+	net->sctp.addr_wq_timer.expires = 0;
+	setup_timer(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler,
+		    (unsigned long)net);
+
+	return 0;
+
+err_ctl_sock_init:
+	sctp_dbg_objcnt_exit(net);
+	sctp_proc_exit(net);
+err_init_proc:
+	cleanup_sctp_mibs(net);
+err_init_mibs:
+	sctp_sysctl_net_unregister(net);
+err_sysctl_register:
+	return status;
+}
+
+static void sctp_net_exit(struct net *net)
+{
+	/* Free the local address list */
+	sctp_free_addr_wq(net);
+	sctp_free_local_addr_list(net);
+
+	/* Free the control endpoint.  */
+	inet_ctl_sock_destroy(net->sctp.ctl_sock);
+
+	sctp_dbg_objcnt_exit(net);
+
+	sctp_proc_exit(net);
+	cleanup_sctp_mibs(net);
+	sctp_sysctl_net_unregister(net);
+}
+
+static struct pernet_operations sctp_net_ops = {
+	.init = sctp_net_init,
+	.exit = sctp_net_exit,
+};
+
 /* Initialize the universe into something sensible.  */
 SCTP_STATIC __init int sctp_init(void)
 {
@@ -1224,62 +1332,9 @@
 	if (!sctp_chunk_cachep)
 		goto err_chunk_cachep;
 
-	/* Allocate and initialise sctp mibs.  */
-	status = init_sctp_mibs();
+	status = percpu_counter_init(&sctp_sockets_allocated, 0);
 	if (status)
-		goto err_init_mibs;
-
-	/* Initialize proc fs directory.  */
-	status = sctp_proc_init();
-	if (status)
-		goto err_init_proc;
-
-	/* Initialize object count debugging.  */
-	sctp_dbg_objcnt_init();
-
-	/*
-	 * 14. Suggested SCTP Protocol Parameter Values
-	 */
-	/* The following protocol parameters are RECOMMENDED:  */
-	/* RTO.Initial              - 3  seconds */
-	sctp_rto_initial		= SCTP_RTO_INITIAL;
-	/* RTO.Min                  - 1  second */
-	sctp_rto_min	 		= SCTP_RTO_MIN;
-	/* RTO.Max                 -  60 seconds */
-	sctp_rto_max 			= SCTP_RTO_MAX;
-	/* RTO.Alpha                - 1/8 */
-	sctp_rto_alpha	        	= SCTP_RTO_ALPHA;
-	/* RTO.Beta                 - 1/4 */
-	sctp_rto_beta			= SCTP_RTO_BETA;
-
-	/* Valid.Cookie.Life        - 60  seconds */
-	sctp_valid_cookie_life		= SCTP_DEFAULT_COOKIE_LIFE;
-
-	/* Whether Cookie Preservative is enabled(1) or not(0) */
-	sctp_cookie_preserve_enable 	= 1;
-
-	/* Max.Burst		    - 4 */
-	sctp_max_burst 			= SCTP_DEFAULT_MAX_BURST;
-
-	/* Association.Max.Retrans  - 10 attempts
-	 * Path.Max.Retrans         - 5  attempts (per destination address)
-	 * Max.Init.Retransmits     - 8  attempts
-	 */
-	sctp_max_retrans_association 	= 10;
-	sctp_max_retrans_path		= 5;
-	sctp_max_retrans_init		= 8;
-
-	/* Sendbuffer growth	    - do per-socket accounting */
-	sctp_sndbuf_policy		= 0;
-
-	/* Rcvbuffer growth	    - do per-socket accounting */
-	sctp_rcvbuf_policy		= 0;
-
-	/* HB.interval              - 30 seconds */
-	sctp_hb_interval		= SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
-
-	/* delayed SACK timeout */
-	sctp_sack_timeout		= SCTP_DEFAULT_TIMEOUT_SACK;
+		goto err_percpu_counter_init;
 
 	/* Implementation specific variables. */
 
@@ -1287,9 +1342,6 @@
 	sctp_max_instreams    		= SCTP_DEFAULT_INSTREAMS;
 	sctp_max_outstreams   		= SCTP_DEFAULT_OUTSTREAMS;
 
-	/* Initialize maximum autoclose timeout. */
-	sctp_max_autoclose		= INT_MAX / HZ;
-
 	/* Initialize handle used for association ids. */
 	idr_init(&sctp_assocs_id);
 
@@ -1376,41 +1428,12 @@
 	pr_info("Hash tables configured (established %d bind %d)\n",
 		sctp_assoc_hashsize, sctp_port_hashsize);
 
-	/* Disable ADDIP by default. */
-	sctp_addip_enable = 0;
-	sctp_addip_noauth = 0;
-	sctp_default_auto_asconf = 0;
-
-	/* Enable PR-SCTP by default. */
-	sctp_prsctp_enable = 1;
-
-	/* Disable AUTH by default. */
-	sctp_auth_enable = 0;
-
-	/* Set SCOPE policy to enabled */
-	sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
-
-	/* Set the default rwnd update threshold */
-	sctp_rwnd_upd_shift		= SCTP_DEFAULT_RWND_SHIFT;
-
 	sctp_sysctl_register();
 
 	INIT_LIST_HEAD(&sctp_address_families);
 	sctp_v4_pf_init();
 	sctp_v6_pf_init();
 
-	/* Initialize the local address list. */
-	INIT_LIST_HEAD(&sctp_local_addr_list);
-	spin_lock_init(&sctp_local_addr_lock);
-	sctp_get_local_addr_list();
-
-	/* Initialize the address event list */
-	INIT_LIST_HEAD(&sctp_addr_waitq);
-	INIT_LIST_HEAD(&sctp_auto_asconf_splist);
-	spin_lock_init(&sctp_addr_wq_lock);
-	sctp_addr_wq_timer.expires = 0;
-	setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
-
 	status = sctp_v4_protosw_init();
 
 	if (status)
@@ -1420,11 +1443,9 @@
 	if (status)
 		goto err_v6_protosw_init;
 
-	/* Initialize the control inode/socket for handling OOTB packets.  */
-	if ((status = sctp_ctl_sock_init())) {
-		pr_err("Failed to initialize the SCTP control sock\n");
-		goto err_ctl_sock_init;
-	}
+	status = register_pernet_subsys(&sctp_net_ops);
+	if (status)
+		goto err_register_pernet_subsys;
 
 	status = sctp_v4_add_protocol();
 	if (status)
@@ -1441,13 +1462,12 @@
 err_v6_add_protocol:
 	sctp_v4_del_protocol();
 err_add_protocol:
-	inet_ctl_sock_destroy(sctp_ctl_sock);
-err_ctl_sock_init:
+	unregister_pernet_subsys(&sctp_net_ops);
+err_register_pernet_subsys:
 	sctp_v6_protosw_exit();
 err_v6_protosw_init:
 	sctp_v4_protosw_exit();
 err_protosw_init:
-	sctp_free_local_addr_list();
 	sctp_v4_pf_exit();
 	sctp_v6_pf_exit();
 	sctp_sysctl_unregister();
@@ -1461,11 +1481,8 @@
 		   get_order(sctp_assoc_hashsize *
 			     sizeof(struct sctp_hashbucket)));
 err_ahash_alloc:
-	sctp_dbg_objcnt_exit();
-	sctp_proc_exit();
-err_init_proc:
-	cleanup_sctp_mibs();
-err_init_mibs:
+	percpu_counter_destroy(&sctp_sockets_allocated);
+err_percpu_counter_init:
 	kmem_cache_destroy(sctp_chunk_cachep);
 err_chunk_cachep:
 	kmem_cache_destroy(sctp_bucket_cachep);
@@ -1482,18 +1499,13 @@
 	/* Unregister with inet6/inet layers. */
 	sctp_v6_del_protocol();
 	sctp_v4_del_protocol();
-	sctp_free_addr_wq();
 
-	/* Free the control endpoint.  */
-	inet_ctl_sock_destroy(sctp_ctl_sock);
+	unregister_pernet_subsys(&sctp_net_ops);
 
 	/* Free protosw registrations */
 	sctp_v6_protosw_exit();
 	sctp_v4_protosw_exit();
 
-	/* Free the local address list.  */
-	sctp_free_local_addr_list();
-
 	/* Unregister with socket layer. */
 	sctp_v6_pf_exit();
 	sctp_v4_pf_exit();
@@ -1508,9 +1520,7 @@
 		   get_order(sctp_port_hashsize *
 			     sizeof(struct sctp_bind_hashbucket)));
 
-	sctp_dbg_objcnt_exit();
-	sctp_proc_exit();
-	cleanup_sctp_mibs();
+	percpu_counter_destroy(&sctp_sockets_allocated);
 
 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 479a70e..fbe1636 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -198,6 +198,7 @@
 			     const struct sctp_bind_addr *bp,
 			     gfp_t gfp, int vparam_len)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	sctp_inithdr_t init;
 	union sctp_params addrs;
 	size_t chunksize;
@@ -237,7 +238,7 @@
 	chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
 	chunksize += sizeof(ecap_param);
 
-	if (sctp_prsctp_enable)
+	if (net->sctp.prsctp_enable)
 		chunksize += sizeof(prsctp_param);
 
 	/* ADDIP: Section 4.2.7:
@@ -245,7 +246,7 @@
 	 *  the ASCONF,the ASCONF-ACK, and the AUTH  chunks in its INIT and
 	 *  INIT-ACK parameters.
 	 */
-	if (sctp_addip_enable) {
+	if (net->sctp.addip_enable) {
 		extensions[num_ext] = SCTP_CID_ASCONF;
 		extensions[num_ext+1] = SCTP_CID_ASCONF_ACK;
 		num_ext += 2;
@@ -257,7 +258,7 @@
 	chunksize += vparam_len;
 
 	/* Account for AUTH related parameters */
-	if (sctp_auth_enable) {
+	if (net->sctp.auth_enable) {
 		/* Add random parameter length*/
 		chunksize += sizeof(asoc->c.auth_random);
 
@@ -331,7 +332,7 @@
 		sctp_addto_param(retval, num_ext, extensions);
 	}
 
-	if (sctp_prsctp_enable)
+	if (net->sctp.prsctp_enable)
 		sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
 
 	if (sp->adaptation_ind) {
@@ -342,7 +343,7 @@
 	}
 
 	/* Add SCTP-AUTH chunks to the parameter list */
-	if (sctp_auth_enable) {
+	if (net->sctp.auth_enable) {
 		sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
 				 asoc->c.auth_random);
 		if (auth_hmacs)
@@ -1940,7 +1941,7 @@
 	return 0;
 }
 
-static int sctp_verify_ext_param(union sctp_params param)
+static int sctp_verify_ext_param(struct net *net, union sctp_params param)
 {
 	__u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
 	int have_auth = 0;
@@ -1964,10 +1965,10 @@
 	 * only if ADD-IP is turned on and we are not backward-compatible
 	 * mode.
 	 */
-	if (sctp_addip_noauth)
+	if (net->sctp.addip_noauth)
 		return 1;
 
-	if (sctp_addip_enable && !have_auth && have_asconf)
+	if (net->sctp.addip_enable && !have_auth && have_asconf)
 		return 0;
 
 	return 1;
@@ -1976,13 +1977,14 @@
 static void sctp_process_ext_param(struct sctp_association *asoc,
 				    union sctp_params param)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	__u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
 	int i;
 
 	for (i = 0; i < num_ext; i++) {
 		switch (param.ext->chunks[i]) {
 		    case SCTP_CID_FWD_TSN:
-			    if (sctp_prsctp_enable &&
+			    if (net->sctp.prsctp_enable &&
 				!asoc->peer.prsctp_capable)
 				    asoc->peer.prsctp_capable = 1;
 			    break;
@@ -1990,12 +1992,12 @@
 			    /* if the peer reports AUTH, assume that he
 			     * supports AUTH.
 			     */
-			    if (sctp_auth_enable)
+			    if (net->sctp.auth_enable)
 				    asoc->peer.auth_capable = 1;
 			    break;
 		    case SCTP_CID_ASCONF:
 		    case SCTP_CID_ASCONF_ACK:
-			    if (sctp_addip_enable)
+			    if (net->sctp.addip_enable)
 				    asoc->peer.asconf_capable = 1;
 			    break;
 		    default:
@@ -2081,7 +2083,8 @@
  *	SCTP_IERROR_ERROR - stop processing, trigger an ERROR
  * 	SCTP_IERROR_NO_ERROR - continue with the chunk
  */
-static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
+static sctp_ierror_t sctp_verify_param(struct net *net,
+					const struct sctp_association *asoc,
 					union sctp_params param,
 					sctp_cid_t cid,
 					struct sctp_chunk *chunk,
@@ -2110,12 +2113,12 @@
 		break;
 
 	case SCTP_PARAM_SUPPORTED_EXT:
-		if (!sctp_verify_ext_param(param))
+		if (!sctp_verify_ext_param(net, param))
 			return SCTP_IERROR_ABORT;
 		break;
 
 	case SCTP_PARAM_SET_PRIMARY:
-		if (sctp_addip_enable)
+		if (net->sctp.addip_enable)
 			break;
 		goto fallthrough;
 
@@ -2126,12 +2129,12 @@
 		break;
 
 	case SCTP_PARAM_FWD_TSN_SUPPORT:
-		if (sctp_prsctp_enable)
+		if (net->sctp.prsctp_enable)
 			break;
 		goto fallthrough;
 
 	case SCTP_PARAM_RANDOM:
-		if (!sctp_auth_enable)
+		if (!net->sctp.auth_enable)
 			goto fallthrough;
 
 		/* SCTP-AUTH: Secion 6.1
@@ -2148,7 +2151,7 @@
 		break;
 
 	case SCTP_PARAM_CHUNKS:
-		if (!sctp_auth_enable)
+		if (!net->sctp.auth_enable)
 			goto fallthrough;
 
 		/* SCTP-AUTH: Section 3.2
@@ -2164,7 +2167,7 @@
 		break;
 
 	case SCTP_PARAM_HMAC_ALGO:
-		if (!sctp_auth_enable)
+		if (!net->sctp.auth_enable)
 			goto fallthrough;
 
 		hmacs = (struct sctp_hmac_algo_param *)param.p;
@@ -2198,7 +2201,7 @@
 }
 
 /* Verify the INIT packet before we process it.  */
-int sctp_verify_init(const struct sctp_association *asoc,
+int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
 		     sctp_cid_t cid,
 		     sctp_init_chunk_t *peer_init,
 		     struct sctp_chunk *chunk,
@@ -2245,7 +2248,7 @@
 	/* Verify all the variable length parameters */
 	sctp_walk_params(param, peer_init, init_hdr.params) {
 
-		result = sctp_verify_param(asoc, param, cid, chunk, errp);
+		result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
 		switch (result) {
 		    case SCTP_IERROR_ABORT:
 		    case SCTP_IERROR_NOMEM:
@@ -2270,6 +2273,7 @@
 		      const union sctp_addr *peer_addr,
 		      sctp_init_chunk_t *peer_init, gfp_t gfp)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	union sctp_params param;
 	struct sctp_transport *transport;
 	struct list_head *pos, *temp;
@@ -2326,7 +2330,7 @@
 	 * also give us an option to silently ignore the packet, which
 	 * is what we'll do here.
 	 */
-	if (!sctp_addip_noauth &&
+	if (!net->sctp.addip_noauth &&
 	     (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
 		asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
 						  SCTP_PARAM_DEL_IP |
@@ -2466,6 +2470,7 @@
 			      const union sctp_addr *peer_addr,
 			      gfp_t gfp)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	union sctp_addr addr;
 	int i;
 	__u16 sat;
@@ -2494,13 +2499,13 @@
 		af = sctp_get_af_specific(param_type2af(param.p->type));
 		af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
 		scope = sctp_scope(peer_addr);
-		if (sctp_in_scope(&addr, scope))
+		if (sctp_in_scope(net, &addr, scope))
 			if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
 				return 0;
 		break;
 
 	case SCTP_PARAM_COOKIE_PRESERVATIVE:
-		if (!sctp_cookie_preserve_enable)
+		if (!net->sctp.cookie_preserve_enable)
 			break;
 
 		stale = ntohl(param.life->lifespan_increment);
@@ -2580,7 +2585,7 @@
 		break;
 
 	case SCTP_PARAM_SET_PRIMARY:
-		if (!sctp_addip_enable)
+		if (!net->sctp.addip_enable)
 			goto fall_through;
 
 		addr_param = param.v + sizeof(sctp_addip_param_t);
@@ -2607,7 +2612,7 @@
 		break;
 
 	case SCTP_PARAM_FWD_TSN_SUPPORT:
-		if (sctp_prsctp_enable) {
+		if (net->sctp.prsctp_enable) {
 			asoc->peer.prsctp_capable = 1;
 			break;
 		}
@@ -2615,7 +2620,7 @@
 		goto fall_through;
 
 	case SCTP_PARAM_RANDOM:
-		if (!sctp_auth_enable)
+		if (!net->sctp.auth_enable)
 			goto fall_through;
 
 		/* Save peer's random parameter */
@@ -2628,7 +2633,7 @@
 		break;
 
 	case SCTP_PARAM_HMAC_ALGO:
-		if (!sctp_auth_enable)
+		if (!net->sctp.auth_enable)
 			goto fall_through;
 
 		/* Save peer's HMAC list */
@@ -2644,7 +2649,7 @@
 		break;
 
 	case SCTP_PARAM_CHUNKS:
-		if (!sctp_auth_enable)
+		if (!net->sctp.auth_enable)
 			goto fall_through;
 
 		asoc->peer.peer_chunks = kmemdup(param.p,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index fe99628..bcfebb9 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -251,6 +251,7 @@
 	int error;
 	struct sctp_transport *transport = (struct sctp_transport *) peer;
 	struct sctp_association *asoc = transport->asoc;
+	struct net *net = sock_net(asoc->base.sk);
 
 	/* Check whether a task is in the sock.  */
 
@@ -271,7 +272,7 @@
 		goto out_unlock;
 
 	/* Run through the state machine.  */
-	error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
 			   asoc->state,
 			   asoc->ep, asoc,
@@ -291,6 +292,7 @@
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
 					sctp_event_timeout_t timeout_type)
 {
+	struct net *net = sock_net(asoc->base.sk);
 	int error = 0;
 
 	sctp_bh_lock_sock(asoc->base.sk);
@@ -312,7 +314,7 @@
 		goto out_unlock;
 
 	/* Run through the state machine.  */
-	error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
 			   SCTP_ST_TIMEOUT(timeout_type),
 			   asoc->state, asoc->ep, asoc,
 			   (void *)timeout_type, GFP_ATOMIC);
@@ -371,6 +373,7 @@
 	int error = 0;
 	struct sctp_transport *transport = (struct sctp_transport *) data;
 	struct sctp_association *asoc = transport->asoc;
+	struct net *net = sock_net(asoc->base.sk);
 
 	sctp_bh_lock_sock(asoc->base.sk);
 	if (sock_owned_by_user(asoc->base.sk)) {
@@ -388,7 +391,7 @@
 	if (transport->dead)
 		goto out_unlock;
 
-	error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
 			   asoc->state, asoc->ep, asoc,
 			   transport, GFP_ATOMIC);
@@ -408,6 +411,7 @@
 {
 	struct sctp_transport *transport = (struct sctp_transport *) data;
 	struct sctp_association *asoc = transport->asoc;
+	struct net *net = sock_net(asoc->base.sk);
 	
 	sctp_bh_lock_sock(asoc->base.sk);
 	if (sock_owned_by_user(asoc->base.sk)) {
@@ -426,7 +430,7 @@
 	if (asoc->base.dead)
 		goto out_unlock;
 
-	sctp_do_sm(SCTP_EVENT_T_OTHER,
+	sctp_do_sm(net, SCTP_EVENT_T_OTHER,
 		   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
 		   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
@@ -753,8 +757,10 @@
 	int err = 0;
 
 	if (sctp_outq_sack(&asoc->outqueue, sackh)) {
+		struct net *net = sock_net(asoc->base.sk);
+
 		/* There are no more TSNs awaiting SACK.  */
-		err = sctp_do_sm(SCTP_EVENT_T_OTHER,
+		err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
 				 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
 				 asoc->state, asoc->ep, asoc, NULL,
 				 GFP_ATOMIC);
@@ -1042,6 +1048,8 @@
  */
 static void sctp_cmd_send_asconf(struct sctp_association *asoc)
 {
+	struct net *net = sock_net(asoc->base.sk);
+
 	/* Send the next asconf chunk from the addip chunk
 	 * queue.
 	 */
@@ -1053,7 +1061,7 @@
 
 		/* Hold the chunk until an ASCONF_ACK is received. */
 		sctp_chunk_hold(asconf);
-		if (sctp_primitive_ASCONF(asoc, asconf))
+		if (sctp_primitive_ASCONF(net, asoc, asconf))
 			sctp_chunk_free(asconf);
 		else
 			asoc->addip_last_asconf = asconf;
@@ -1089,7 +1097,7 @@
  * If you want to understand all of lksctp, this is a
  * good place to start.
  */
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
 	       sctp_state_t state,
 	       struct sctp_endpoint *ep,
 	       struct sctp_association *asoc,
@@ -1110,12 +1118,12 @@
 	/* Look up the state function, run it, and then process the
 	 * side effects.  These three steps are the heart of lksctp.
 	 */
-	state_fn = sctp_sm_lookup_event(event_type, state, subtype);
+	state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
 
 	sctp_init_cmd_seq(&commands);
 
 	DEBUG_PRE;
-	status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands);
+	status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands);
 	DEBUG_POST;
 
 	error = sctp_side_effects(event_type, subtype, state,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9fca103..094813b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -66,7 +66,8 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/structs.h>
 
-static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
+static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
+				  const struct sctp_endpoint *ep,
 				  const struct sctp_association *asoc,
 				  struct sctp_chunk *chunk,
 				  const void *payload,
@@ -74,36 +75,43 @@
 static int sctp_eat_data(const struct sctp_association *asoc,
 			 struct sctp_chunk *chunk,
 			 sctp_cmd_seq_t *commands);
-static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
+static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
+					     const struct sctp_association *asoc,
 					     const struct sctp_chunk *chunk);
-static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
+static void sctp_send_stale_cookie_err(struct net *net,
+				       const struct sctp_endpoint *ep,
 				       const struct sctp_association *asoc,
 				       const struct sctp_chunk *chunk,
 				       sctp_cmd_seq_t *commands,
 				       struct sctp_chunk *err_chunk);
-static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
+						 const struct sctp_endpoint *ep,
 						 const struct sctp_association *asoc,
 						 const sctp_subtype_t type,
 						 void *arg,
 						 sctp_cmd_seq_t *commands);
-static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
+					     const struct sctp_endpoint *ep,
 					     const struct sctp_association *asoc,
 					     const sctp_subtype_t type,
 					     void *arg,
 					     sctp_cmd_seq_t *commands);
-static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
 					sctp_cmd_seq_t *commands);
 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
 
-static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
+static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
+					   sctp_cmd_seq_t *commands,
 					   __be16 error, int sk_err,
 					   const struct sctp_association *asoc,
 					   struct sctp_transport *transport);
 
 static sctp_disposition_t sctp_sf_abort_violation(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     void *arg,
@@ -112,6 +120,7 @@
 				     const size_t paylen);
 
 static sctp_disposition_t sctp_sf_violation_chunklen(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
@@ -119,6 +128,7 @@
 				     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_paramlen(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
@@ -126,6 +136,7 @@
 				     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_ctsn(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
@@ -133,18 +144,21 @@
 				     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_chunk(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
 				     void *arg,
 				     sctp_cmd_seq_t *commands);
 
-static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+				    const struct sctp_endpoint *ep,
 				    const struct sctp_association *asoc,
 				    const sctp_subtype_t type,
 				    struct sctp_chunk *chunk);
 
-static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -204,7 +218,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_4_C(struct net *net,
+				  const struct sctp_endpoint *ep,
 				  const struct sctp_association *asoc,
 				  const sctp_subtype_t type,
 				  void *arg,
@@ -214,7 +229,7 @@
 	struct sctp_ulpevent *ev;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* RFC 2960 6.10 Bundling
 	 *
@@ -222,11 +237,11 @@
 	 * SHUTDOWN COMPLETE with any other chunks.
 	 */
 	if (!chunk->singleton)
-		return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+		return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* RFC 2960 10.2 SCTP-to-ULP
@@ -259,8 +274,8 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_CLOSED));
 
-	SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
-	SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+	SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
@@ -289,7 +304,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -313,21 +329,21 @@
 	 * with an INIT chunk that is bundled with other chunks.
 	 */
 	if (!chunk->singleton)
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* If the packet is an OOTB packet which is temporarily on the
 	 * control endpoint, respond with an ABORT.
 	 */
-	if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
-		SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
-		return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+	if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+		SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+		return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 	}
 
 	/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
 	 * Tag.
 	 */
 	if (chunk->sctp_hdr->vtag != 0)
-		return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+		return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the INIT chunk has a valid length.
 	 * Normally, this would cause an ABORT with a Protocol Violation
@@ -335,7 +351,7 @@
 	 * just discard the packet.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* If the INIT is coming toward a closing socket, we'll send back
 	 * and ABORT.  Essentially, this catches the race of INIT being
@@ -344,18 +360,18 @@
 	 * can treat this OOTB
 	 */
 	if (sctp_sstate(ep->base.sk, CLOSING))
-		return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+		return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
 	/* Verify the INIT chunk before processing it. */
 	err_chunk = NULL;
-	if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+	if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
 			      (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
 			      &err_chunk)) {
 		/* This chunk contains fatal error. It is to be discarded.
 		 * Send an ABORT, with causes if there is any.
 		 */
 		if (err_chunk) {
-			packet = sctp_abort_pkt_new(ep, asoc, arg,
+			packet = sctp_abort_pkt_new(net, ep, asoc, arg,
 					(__u8 *)(err_chunk->chunk_hdr) +
 					sizeof(sctp_chunkhdr_t),
 					ntohs(err_chunk->chunk_hdr->length) -
@@ -366,13 +382,13 @@
 			if (packet) {
 				sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
 						SCTP_PACKET(packet));
-				SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+				SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 				return SCTP_DISPOSITION_CONSUME;
 			} else {
 				return SCTP_DISPOSITION_NOMEM;
 			}
 		} else {
-			return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
+			return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
 						    commands);
 		}
 	}
@@ -484,7 +500,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
+				       const struct sctp_endpoint *ep,
 				       const struct sctp_association *asoc,
 				       const sctp_subtype_t type,
 				       void *arg,
@@ -496,25 +513,25 @@
 	struct sctp_packet *packet;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* 6.10 Bundling
 	 * An endpoint MUST NOT bundle INIT, INIT ACK or
 	 * SHUTDOWN COMPLETE with any other chunks.
 	 */
 	if (!chunk->singleton)
-		return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+		return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the INIT-ACK chunk has a valid length */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 	/* Grab the INIT header.  */
 	chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
 
 	/* Verify the INIT chunk before processing it. */
 	err_chunk = NULL;
-	if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+	if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
 			      (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
 			      &err_chunk)) {
 
@@ -526,7 +543,7 @@
 		 * the association.
 		 */
 		if (err_chunk) {
-			packet = sctp_abort_pkt_new(ep, asoc, arg,
+			packet = sctp_abort_pkt_new(net, ep, asoc, arg,
 					(__u8 *)(err_chunk->chunk_hdr) +
 					sizeof(sctp_chunkhdr_t),
 					ntohs(err_chunk->chunk_hdr->length) -
@@ -537,7 +554,7 @@
 			if (packet) {
 				sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
 						SCTP_PACKET(packet));
-				SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+				SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 				error = SCTP_ERROR_INV_PARAM;
 			}
 		}
@@ -554,10 +571,10 @@
 		 * was malformed.
 		 */
 		if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
-		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-		return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+		return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
 						asoc, chunk->transport);
 	}
 
@@ -633,7 +650,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+				      const struct sctp_endpoint *ep,
 				      const struct sctp_association *asoc,
 				      const sctp_subtype_t type, void *arg,
 				      sctp_cmd_seq_t *commands)
@@ -650,9 +668,9 @@
 	/* If the packet is an OOTB packet which is temporarily on the
 	 * control endpoint, respond with an ABORT.
 	 */
-	if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
-		SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
-		return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+	if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+		SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+		return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 	}
 
 	/* Make sure that the COOKIE_ECHO chunk has a valid length.
@@ -661,7 +679,7 @@
 	 * in sctp_unpack_cookie().
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* If the endpoint is not listening or if the number of associations
 	 * on the TCP-style socket exceed the max backlog, respond with an
@@ -670,7 +688,7 @@
 	sk = ep->base.sk;
 	if (!sctp_sstate(sk, LISTENING) ||
 	    (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
-		return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+		return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
 	/* "Decode" the chunk.  We have no optional parameters so we
 	 * are in good shape.
@@ -703,13 +721,13 @@
 			goto nomem;
 
 		case -SCTP_IERROR_STALE_COOKIE:
-			sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
+			sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
 						   err_chk_p);
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 		case -SCTP_IERROR_BAD_SIG:
 		default:
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 		}
 	}
 
@@ -756,14 +774,14 @@
 		skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
 		auth.transport = chunk->transport;
 
-		ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
+		ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
 
 		/* We can now safely free the auth_chunk clone */
 		kfree_skb(chunk->auth_chunk);
 
 		if (ret != SCTP_IERROR_NO_ERROR) {
 			sctp_association_free(new_asoc);
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 		}
 	}
 
@@ -804,8 +822,8 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_ESTABLISHED));
-	SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
-	SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS);
+	SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
 	sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 
 	if (new_asoc->autoclose)
@@ -856,7 +874,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
+				      const struct sctp_endpoint *ep,
 				      const struct sctp_association *asoc,
 				      const sctp_subtype_t type, void *arg,
 				      sctp_cmd_seq_t *commands)
@@ -865,13 +884,13 @@
 	struct sctp_ulpevent *ev;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Verify that the chunk length for the COOKIE-ACK is OK.
 	 * If we don't do this, any bundled chunks may be junked.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* Reset init error count upon receipt of COOKIE-ACK,
@@ -892,8 +911,8 @@
 			SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_ESTABLISHED));
-	SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
-	SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS);
+	SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
 	sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 	if (asoc->autoclose)
 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
@@ -958,7 +977,8 @@
 }
 
 /* Generate a HEARTBEAT packet on the given transport.  */
-sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -972,8 +992,8 @@
 		/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_PERR(SCTP_ERROR_NO_ERROR));
-		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+		SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_DELETE_TCB;
 	}
 
@@ -1028,7 +1048,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
+				    const struct sctp_endpoint *ep,
 				    const struct sctp_association *asoc,
 				    const sctp_subtype_t type,
 				    void *arg,
@@ -1039,11 +1060,11 @@
 	size_t paylen = 0;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the HEARTBEAT chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* 8.3 The receiver of the HEARTBEAT should immediately
@@ -1095,7 +1116,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -1108,12 +1130,12 @@
 	unsigned long max_interval;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the HEARTBEAT-ACK chunk has a valid length.  */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
 					    sizeof(sctp_sender_hb_info_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
@@ -1171,7 +1193,7 @@
 /* Helper function to send out an abort for the restart
  * condition.
  */
-static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
+static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
 				      struct sctp_chunk *init,
 				      sctp_cmd_seq_t *commands)
 {
@@ -1197,18 +1219,18 @@
 	errhdr->length = htons(len);
 
 	/* Assign to the control socket. */
-	ep = sctp_sk((sctp_get_ctl_sock()))->ep;
+	ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
 	/* Association is NULL since this may be a restart attack and we
 	 * want to send back the attacker's vtag.
 	 */
-	pkt = sctp_abort_pkt_new(ep, NULL, init, errhdr, len);
+	pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
 
 	if (!pkt)
 		goto out;
 	sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
 
-	SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+	SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
 	/* Discard the rest of the inbound packet. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
@@ -1240,6 +1262,7 @@
 				       struct sctp_chunk *init,
 				       sctp_cmd_seq_t *commands)
 {
+	struct net *net = sock_net(new_asoc->base.sk);
 	struct sctp_transport *new_addr;
 	int ret = 1;
 
@@ -1258,7 +1281,7 @@
 			    transports) {
 		if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
 					&new_addr->ipaddr)) {
-			sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+			sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
 						   commands);
 			ret = 0;
 			break;
@@ -1358,6 +1381,7 @@
  * chunk handling.
  */
 static sctp_disposition_t sctp_sf_do_unexpected_init(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -1382,20 +1406,20 @@
 	 * with an INIT chunk that is bundled with other chunks.
 	 */
 	if (!chunk->singleton)
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
 	 * Tag.
 	 */
 	if (chunk->sctp_hdr->vtag != 0)
-		return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+		return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the INIT chunk has a valid length.
 	 * In this case, we generate a protocol violation since we have
 	 * an association established.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 	/* Grab the INIT header.  */
 	chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
@@ -1405,14 +1429,14 @@
 
 	/* Verify the INIT chunk before processing it. */
 	err_chunk = NULL;
-	if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+	if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
 			      (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
 			      &err_chunk)) {
 		/* This chunk contains fatal error. It is to be discarded.
 		 * Send an ABORT, with causes if there is any.
 		 */
 		if (err_chunk) {
-			packet = sctp_abort_pkt_new(ep, asoc, arg,
+			packet = sctp_abort_pkt_new(net, ep, asoc, arg,
 					(__u8 *)(err_chunk->chunk_hdr) +
 					sizeof(sctp_chunkhdr_t),
 					ntohs(err_chunk->chunk_hdr->length) -
@@ -1421,14 +1445,14 @@
 			if (packet) {
 				sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
 						SCTP_PACKET(packet));
-				SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+				SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 				retval = SCTP_DISPOSITION_CONSUME;
 			} else {
 				retval = SCTP_DISPOSITION_NOMEM;
 			}
 			goto cleanup;
 		} else {
-			return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
+			return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
 						    commands);
 		}
 	}
@@ -1570,7 +1594,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net,
+				    const struct sctp_endpoint *ep,
 				    const struct sctp_association *asoc,
 				    const sctp_subtype_t type,
 				    void *arg,
@@ -1579,7 +1604,7 @@
 	/* Call helper to do the real work for both simulataneous and
 	 * duplicate INIT chunk handling.
 	 */
-	return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
+	return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -1623,7 +1648,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -1632,7 +1658,7 @@
 	/* Call helper to do the real work for both simulataneous and
 	 * duplicate INIT chunk handling.
 	 */
-	return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
+	return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
 }
 
 
@@ -1645,7 +1671,8 @@
  * An unexpected INIT ACK usually indicates the processing of an old or
  * duplicated INIT chunk.
 */
-sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net,
+					    const struct sctp_endpoint *ep,
 					    const struct sctp_association *asoc,
 					    const sctp_subtype_t type,
 					    void *arg, sctp_cmd_seq_t *commands)
@@ -1653,10 +1680,10 @@
 	/* Per the above section, we'll discard the chunk if we have an
 	 * endpoint.  If this is an OOTB INIT-ACK, treat it as such.
 	 */
-	if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
-		return sctp_sf_ootb(ep, asoc, type, arg, commands);
+	if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
+		return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
 	else
-		return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 }
 
 /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
@@ -1664,7 +1691,8 @@
  * Section 5.2.4
  *  A)  In this case, the peer may have restarted.
  */
-static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					struct sctp_chunk *chunk,
 					sctp_cmd_seq_t *commands,
@@ -1700,7 +1728,7 @@
 	 * its peer.
 	*/
 	if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
-		disposition = sctp_sf_do_9_2_reshutack(ep, asoc,
+		disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
 				SCTP_ST_CHUNK(chunk->chunk_hdr->type),
 				chunk, commands);
 		if (SCTP_DISPOSITION_NOMEM == disposition)
@@ -1763,7 +1791,8 @@
  *      after responding to the local endpoint's INIT
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					struct sctp_chunk *chunk,
 					sctp_cmd_seq_t *commands,
@@ -1784,7 +1813,7 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_ESTABLISHED));
-	SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
 	sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 
 	repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1833,7 +1862,8 @@
  *     but a new tag of its own.
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					struct sctp_chunk *chunk,
 					sctp_cmd_seq_t *commands,
@@ -1854,7 +1884,8 @@
  *    enter the ESTABLISHED state, if it has not already done so.
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					struct sctp_chunk *chunk,
 					sctp_cmd_seq_t *commands,
@@ -1876,7 +1907,7 @@
 				SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
 		sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 				SCTP_STATE(SCTP_STATE_ESTABLISHED));
-		SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+		SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
 		sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
 				SCTP_NULL());
 
@@ -1948,7 +1979,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -1967,7 +1999,7 @@
 	 * done later.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* "Decode" the chunk.  We have no optional parameters so we
@@ -2001,12 +2033,12 @@
 			goto nomem;
 
 		case -SCTP_IERROR_STALE_COOKIE:
-			sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
+			sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
 						   err_chk_p);
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 		case -SCTP_IERROR_BAD_SIG:
 		default:
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 		}
 	}
 
@@ -2017,27 +2049,27 @@
 
 	switch (action) {
 	case 'A': /* Association restart. */
-		retval = sctp_sf_do_dupcook_a(ep, asoc, chunk, commands,
+		retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
 					      new_asoc);
 		break;
 
 	case 'B': /* Collision case B. */
-		retval = sctp_sf_do_dupcook_b(ep, asoc, chunk, commands,
+		retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
 					      new_asoc);
 		break;
 
 	case 'C': /* Collision case C. */
-		retval = sctp_sf_do_dupcook_c(ep, asoc, chunk, commands,
+		retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
 					      new_asoc);
 		break;
 
 	case 'D': /* Collision case D. */
-		retval = sctp_sf_do_dupcook_d(ep, asoc, chunk, commands,
+		retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
 					      new_asoc);
 		break;
 
 	default: /* Discard packet for all others. */
-		retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 		break;
 	}
 
@@ -2063,6 +2095,7 @@
  * See sctp_sf_do_9_1_abort().
  */
 sctp_disposition_t sctp_sf_shutdown_pending_abort(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -2072,7 +2105,7 @@
 	struct sctp_chunk *chunk = arg;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the ABORT chunk has a valid length.
 	 * Since this is an ABORT chunk, we have to discard it
@@ -2085,7 +2118,7 @@
 	 * packet.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* ADD-IP: Special case for ABORT chunks
 	 * F4)  One special consideration is that ABORT Chunks arriving
@@ -2094,9 +2127,9 @@
 	 */
 	if (SCTP_ADDR_DEL ==
 		    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-		return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
-	return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+	return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2104,7 +2137,8 @@
  *
  * See sctp_sf_do_9_1_abort().
  */
-sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -2113,7 +2147,7 @@
 	struct sctp_chunk *chunk = arg;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the ABORT chunk has a valid length.
 	 * Since this is an ABORT chunk, we have to discard it
@@ -2126,7 +2160,7 @@
 	 * packet.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* ADD-IP: Special case for ABORT chunks
 	 * F4)  One special consideration is that ABORT Chunks arriving
@@ -2135,7 +2169,7 @@
 	 */
 	if (SCTP_ADDR_DEL ==
 		    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-		return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Stop the T2-shutdown timer. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -2145,7 +2179,7 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
 			SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-	return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+	return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2154,6 +2188,7 @@
  * See sctp_sf_do_9_1_abort().
  */
 sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -2163,7 +2198,7 @@
 	/* The same T2 timer, so we should be able to use
 	 * common function with the SHUTDOWN-SENT state.
 	 */
-	return sctp_sf_shutdown_sent_abort(ep, asoc, type, arg, commands);
+	return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2180,7 +2215,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -2190,13 +2226,13 @@
 	sctp_errhdr_t *err;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the ERROR chunk has a valid length.
 	 * The parameter walking depends on this as well.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* Process the error here */
@@ -2206,7 +2242,7 @@
 	 */
 	sctp_walk_errors(err, chunk->chunk_hdr) {
 		if (SCTP_ERROR_STALE_COOKIE == err->cause)
-			return sctp_sf_do_5_2_6_stale(ep, asoc, type,
+			return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
 							arg, commands);
 	}
 
@@ -2215,7 +2251,7 @@
 	 * we are discarding the packet, there should be no adverse
 	 * affects.
 	 */
-	return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+	return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2243,7 +2279,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
+						 const struct sctp_endpoint *ep,
 						 const struct sctp_association *asoc,
 						 const sctp_subtype_t type,
 						 void *arg,
@@ -2365,7 +2402,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -2374,7 +2412,7 @@
 	struct sctp_chunk *chunk = arg;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the ABORT chunk has a valid length.
 	 * Since this is an ABORT chunk, we have to discard it
@@ -2387,7 +2425,7 @@
 	 * packet.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* ADD-IP: Special case for ABORT chunks
 	 * F4)  One special consideration is that ABORT Chunks arriving
@@ -2396,12 +2434,13 @@
 	 */
 	if (SCTP_ADDR_DEL ==
 		    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-		return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
-	return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+	return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
-static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -2418,7 +2457,7 @@
 		sctp_errhdr_t *err;
 		sctp_walk_errors(err, chunk->chunk_hdr);
 		if ((void *)err != (void *)chunk->chunk_end)
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 		error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 	}
@@ -2426,8 +2465,8 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
 	/* ASSOC_FAILED will DELETE_TCB. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
-	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-	SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+	SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
 	return SCTP_DISPOSITION_ABORT;
 }
@@ -2437,7 +2476,8 @@
  *
  * See sctp_sf_do_9_1_abort() above.
  */
-sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net,
+				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
 				     void *arg,
@@ -2448,7 +2488,7 @@
 	__be16 error = SCTP_ERROR_NO_ERROR;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the ABORT chunk has a valid length.
 	 * Since this is an ABORT chunk, we have to discard it
@@ -2461,27 +2501,28 @@
 	 * packet.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* See if we have an error cause code in the chunk.  */
 	len = ntohs(chunk->chunk_hdr->length);
 	if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
 		error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 
-	return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+	return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
 				      chunk->transport);
 }
 
 /*
  * Process an incoming ICMP as an ABORT.  (COOKIE-WAIT state)
  */
-sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
 					sctp_cmd_seq_t *commands)
 {
-	return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+	return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
 				      ENOPROTOOPT, asoc,
 				      (struct sctp_transport *)arg);
 }
@@ -2489,7 +2530,8 @@
 /*
  * Process an ABORT.  (COOKIE-ECHOED state)
  */
-sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net,
+					       const struct sctp_endpoint *ep,
 					       const struct sctp_association *asoc,
 					       const sctp_subtype_t type,
 					       void *arg,
@@ -2498,7 +2540,7 @@
 	/* There is a single T1 timer, so we should be able to use
 	 * common function with the COOKIE-WAIT state.
 	 */
-	return sctp_sf_cookie_wait_abort(ep, asoc, type, arg, commands);
+	return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2506,7 +2548,8 @@
  *
  * This is common code called by several sctp_sf_*_abort() functions above.
  */
-static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
+static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
+					   sctp_cmd_seq_t *commands,
 					   __be16 error, int sk_err,
 					   const struct sctp_association *asoc,
 					   struct sctp_transport *transport)
@@ -2514,7 +2557,7 @@
 	SCTP_DEBUG_PRINTK("ABORT received (INIT).\n");
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_CLOSED));
-	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 	sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
 			SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
 	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
@@ -2557,7 +2600,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net,
+					   const struct sctp_endpoint *ep,
 					   const struct sctp_association *asoc,
 					   const sctp_subtype_t type,
 					   void *arg,
@@ -2570,12 +2614,12 @@
 	__u32 ctsn;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the SHUTDOWN chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk,
 				      sizeof(struct sctp_shutdown_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* Convert the elaborate header.  */
@@ -2595,7 +2639,7 @@
 	 * sender with an ABORT.
 	 */
 	if (!TSN_lt(ctsn, asoc->next_tsn))
-		return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+		return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
 	/* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
 	 * When a peer sends a SHUTDOWN, SCTP delivers this notification to
@@ -2619,7 +2663,7 @@
 	disposition = SCTP_DISPOSITION_CONSUME;
 
 	if (sctp_outq_is_empty(&asoc->outqueue)) {
-		disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type,
+		disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
 							  arg, commands);
 	}
 
@@ -2645,7 +2689,8 @@
  * The Cumulative TSN Ack of the received SHUTDOWN chunk
  * MUST be processed.
  */
-sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net,
+					   const struct sctp_endpoint *ep,
 					   const struct sctp_association *asoc,
 					   const sctp_subtype_t type,
 					   void *arg,
@@ -2656,12 +2701,12 @@
 	__u32 ctsn;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the SHUTDOWN chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk,
 				      sizeof(struct sctp_shutdown_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
@@ -2678,7 +2723,7 @@
 	 * sender with an ABORT.
 	 */
 	if (!TSN_lt(ctsn, asoc->next_tsn))
-		return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+		return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
 	/* verify, by checking the Cumulative TSN Ack field of the
 	 * chunk, that all its outstanding DATA chunks have been
@@ -2697,7 +2742,8 @@
  * that belong to this association, it should discard the INIT chunk and
  * retransmit the SHUTDOWN ACK chunk.
  */
-sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net,
+				    const struct sctp_endpoint *ep,
 				    const struct sctp_association *asoc,
 				    const sctp_subtype_t type,
 				    void *arg,
@@ -2708,7 +2754,7 @@
 
 	/* Make sure that the chunk has a valid length */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* Since we are not going to really process this INIT, there
@@ -2760,7 +2806,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net,
+				      const struct sctp_endpoint *ep,
 				      const struct sctp_association *asoc,
 				      const sctp_subtype_t type,
 				      void *arg,
@@ -2771,10 +2818,10 @@
 	u32 lowest_tsn;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	cwr = (sctp_cwrhdr_t *) chunk->skb->data;
@@ -2815,7 +2862,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_ecne(struct net *net,
+				   const struct sctp_endpoint *ep,
 				   const struct sctp_association *asoc,
 				   const sctp_subtype_t type,
 				   void *arg,
@@ -2825,10 +2873,10 @@
 	struct sctp_chunk *chunk = arg;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	ecne = (sctp_ecnehdr_t *) chunk->skb->data;
@@ -2871,7 +2919,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -2884,11 +2933,11 @@
 	if (!sctp_vtag_verify(chunk, asoc)) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
 				SCTP_NULL());
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	error = sctp_eat_data(asoc, chunk, commands );
@@ -2897,16 +2946,16 @@
 		break;
 	case SCTP_IERROR_HIGH_TSN:
 	case SCTP_IERROR_BAD_STREAM:
-		SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+		SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
 		goto discard_noforce;
 	case SCTP_IERROR_DUP_TSN:
 	case SCTP_IERROR_IGNORE_TSN:
-		SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+		SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
 		goto discard_force;
 	case SCTP_IERROR_NO_DATA:
 		goto consume;
 	case SCTP_IERROR_PROTO_VIOLATION:
-		return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+		return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
 			(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
 	default:
 		BUG();
@@ -2992,7 +3041,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
+				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
 				     void *arg,
@@ -3004,11 +3054,11 @@
 	if (!sctp_vtag_verify(chunk, asoc)) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
 				SCTP_NULL());
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	error = sctp_eat_data(asoc, chunk, commands );
@@ -3022,7 +3072,7 @@
 	case SCTP_IERROR_NO_DATA:
 		goto consume;
 	case SCTP_IERROR_PROTO_VIOLATION:
-		return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+		return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
 			(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
 	default:
 		BUG();
@@ -3082,7 +3132,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -3093,18 +3144,18 @@
 	__u32 ctsn;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the SACK chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* Pull the SACK chunk from the data buffer */
 	sackh = sctp_sm_pull_sack(chunk);
 	/* Was this a bogus SACK? */
 	if (!sackh)
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	chunk->subh.sack_hdr = sackh;
 	ctsn = ntohl(sackh->cum_tsn_ack);
 
@@ -3125,7 +3176,7 @@
 	 * sender with an ABORT.
 	 */
 	if (!TSN_lt(ctsn, asoc->next_tsn))
-		return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+		return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
 	/* Return this SACK for further processing.  */
 	sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh));
@@ -3154,7 +3205,8 @@
  *
  * The return value is the disposition of the chunk.
 */
-static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -3164,7 +3216,7 @@
 	struct sctp_chunk *chunk = arg;
 	struct sctp_chunk *abort;
 
-	packet = sctp_ootb_pkt_new(asoc, chunk);
+	packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
 	if (packet) {
 		/* Make an ABORT. The T bit will be set if the asoc
@@ -3188,9 +3240,9 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
 				SCTP_PACKET(packet));
 
-		SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
-		sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 		return SCTP_DISPOSITION_CONSUME;
 	}
 
@@ -3205,7 +3257,8 @@
  *
  * The return value is the disposition of the chunk.
 */
-sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_operr_notify(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -3215,15 +3268,15 @@
 	sctp_errhdr_t *err;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the ERROR chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 	sctp_walk_errors(err, chunk->chunk_hdr);
 	if ((void *)err != (void *)chunk->chunk_end)
-		return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+		return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
 						  (void *)err, commands);
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
@@ -3242,7 +3295,8 @@
  *
  * The return value is the disposition.
  */
-sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_final(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -3253,11 +3307,11 @@
 	struct sctp_ulpevent *ev;
 
 	if (!sctp_vtag_verify(chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 	/* 10.2 H) SHUTDOWN COMPLETE notification
 	 *
@@ -3290,8 +3344,8 @@
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_CLOSED));
-	SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
-	SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+	SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
 
 	/* ...and remove all record of the association. */
@@ -3324,7 +3378,8 @@
  *    receiver of the OOTB packet shall discard the OOTB packet and take
  *    no further action.
  */
-sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_ootb(struct net *net,
+				const struct sctp_endpoint *ep,
 				const struct sctp_association *asoc,
 				const sctp_subtype_t type,
 				void *arg,
@@ -3338,13 +3393,13 @@
 	int ootb_shut_ack = 0;
 	int ootb_cookie_ack = 0;
 
-	SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+	SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 
 	ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
 	do {
 		/* Report violation if the chunk is less then minimal */
 		if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
-			return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+			return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 		/* Now that we know we at least have a chunk header,
@@ -3359,7 +3414,7 @@
 		 *   sending an ABORT of its own.
 		 */
 		if (SCTP_CID_ABORT == ch->type)
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 		/* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
 		 * or a COOKIE ACK the SCTP Packet should be silently
@@ -3381,18 +3436,18 @@
 		/* Report violation if chunk len overflows */
 		ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
 		if (ch_end > skb_tail_pointer(skb))
-			return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+			return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 		ch = (sctp_chunkhdr_t *) ch_end;
 	} while (ch_end < skb_tail_pointer(skb));
 
 	if (ootb_shut_ack)
-		return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
+		return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
 	else if (ootb_cookie_ack)
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	else
-		return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+		return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -3416,7 +3471,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
+					     const struct sctp_endpoint *ep,
 					     const struct sctp_association *asoc,
 					     const sctp_subtype_t type,
 					     void *arg,
@@ -3426,7 +3482,7 @@
 	struct sctp_chunk *chunk = arg;
 	struct sctp_chunk *shut;
 
-	packet = sctp_ootb_pkt_new(asoc, chunk);
+	packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
 	if (packet) {
 		/* Make an SHUTDOWN_COMPLETE.
@@ -3450,19 +3506,19 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
 				SCTP_PACKET(packet));
 
-		SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
 		/* If the chunk length is invalid, we don't want to process
 		 * the reset of the packet.
 		 */
 		if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 		/* We need to discard the rest of the packet to prevent
 		 * potential bomming attacks from additional bundled chunks.
 		 * This is documented in SCTP Threats ID.
 		 */
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	return SCTP_DISPOSITION_NOMEM;
@@ -3479,7 +3535,8 @@
  *   chunks. --piggy ]
  *
  */
-sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net,
+				      const struct sctp_endpoint *ep,
 				      const struct sctp_association *asoc,
 				      const sctp_subtype_t type,
 				      void *arg,
@@ -3489,7 +3546,7 @@
 
 	/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	/* Although we do have an association in this case, it corresponds
@@ -3497,13 +3554,14 @@
 	 * packet and the state function that handles OOTB SHUTDOWN_ACK is
 	 * called with a NULL association.
 	 */
-	SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+	SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 
-	return sctp_sf_shut_8_4_5(ep, NULL, type, arg, commands);
+	return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
 }
 
 /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.  */
-sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type, void *arg,
 				     sctp_cmd_seq_t *commands)
@@ -3519,7 +3577,7 @@
 	if (!sctp_vtag_verify(chunk, asoc)) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
 				SCTP_NULL());
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	/* ADD-IP: Section 4.1.1
@@ -3528,12 +3586,12 @@
 	 * is received unauthenticated it MUST be silently discarded as
 	 * described in [I-D.ietf-tsvwg-sctp-auth].
 	 */
-	if (!sctp_addip_noauth && !chunk->auth)
-		return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+	if (!net->sctp.addip_noauth && !chunk->auth)
+		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the ASCONF ADDIP chunk has a valid length.  */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	hdr = (sctp_addiphdr_t *)chunk->skb->data;
@@ -3542,7 +3600,7 @@
 	addr_param = (union sctp_addr_param *)hdr->params;
 	length = ntohs(addr_param->p.length);
 	if (length < sizeof(sctp_paramhdr_t))
-		return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+		return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
 			   (void *)addr_param, commands);
 
 	/* Verify the ASCONF chunk before processing it. */
@@ -3550,7 +3608,7 @@
 			    (sctp_paramhdr_t *)((void *)addr_param + length),
 			    (void *)chunk->chunk_end,
 			    &err_param))
-		return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+		return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
 						  (void *)err_param, commands);
 
 	/* ADDIP 5.2 E1) Compare the value of the serial number to the value
@@ -3630,7 +3688,8 @@
  * When building TLV parameters for the ASCONF Chunk that will add or
  * delete IP addresses the D0 to D13 rules should be applied:
  */
-sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
+					 const struct sctp_endpoint *ep,
 					 const struct sctp_association *asoc,
 					 const sctp_subtype_t type, void *arg,
 					 sctp_cmd_seq_t *commands)
@@ -3645,7 +3704,7 @@
 	if (!sctp_vtag_verify(asconf_ack, asoc)) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
 				SCTP_NULL());
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	/* ADD-IP, Section 4.1.2:
@@ -3654,12 +3713,12 @@
 	 * is received unauthenticated it MUST be silently discarded as
 	 * described in [I-D.ietf-tsvwg-sctp-auth].
 	 */
-	if (!sctp_addip_noauth && !asconf_ack->auth)
-		return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+	if (!net->sctp.addip_noauth && !asconf_ack->auth)
+		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the ADDIP chunk has a valid length.  */
 	if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
@@ -3670,7 +3729,7 @@
 	    (sctp_paramhdr_t *)addip_hdr->params,
 	    (void *)asconf_ack->chunk_end,
 	    &err_param))
-		return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+		return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
 			   (void *)err_param, commands);
 
 	if (last_asconf) {
@@ -3705,8 +3764,8 @@
 				SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
-		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+		SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_ABORT;
 	}
 
@@ -3739,8 +3798,8 @@
 				SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
-		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+		SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_ABORT;
 	}
 
@@ -3761,7 +3820,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
+				       const struct sctp_endpoint *ep,
 				       const struct sctp_association *asoc,
 				       const sctp_subtype_t type,
 				       void *arg,
@@ -3776,12 +3836,12 @@
 	if (!sctp_vtag_verify(chunk, asoc)) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
 				SCTP_NULL());
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	/* Make sure that the FORWARD_TSN chunk has valid length.  */
 	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3828,6 +3888,7 @@
 }
 
 sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -3843,12 +3904,12 @@
 	if (!sctp_vtag_verify(chunk, asoc)) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
 				SCTP_NULL());
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	/* Make sure that the FORWARD_TSN chunk has a valid length.  */
 	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3915,7 +3976,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+				    const struct sctp_endpoint *ep,
 				    const struct sctp_association *asoc,
 				    const sctp_subtype_t type,
 				    struct sctp_chunk *chunk)
@@ -3988,7 +4050,8 @@
 	return SCTP_IERROR_NOMEM;
 }
 
-sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_auth(struct net *net,
+				    const struct sctp_endpoint *ep,
 				    const struct sctp_association *asoc,
 				    const sctp_subtype_t type,
 				    void *arg,
@@ -4001,21 +4064,21 @@
 
 	/* Make sure that the peer has AUTH capable */
 	if (!asoc->peer.auth_capable)
-		return sctp_sf_unk_chunk(ep, asoc, type, arg, commands);
+		return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
 
 	if (!sctp_vtag_verify(chunk, asoc)) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
 				SCTP_NULL());
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	/* Make sure that the AUTH chunk has valid length.  */
 	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
-	error = sctp_sf_authenticate(ep, asoc, type, chunk);
+	error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
 	switch (error) {
 	case SCTP_IERROR_AUTH_BAD_HMAC:
 		/* Generate the ERROR chunk and discard the rest
@@ -4032,10 +4095,10 @@
 		/* Fall Through */
 	case SCTP_IERROR_AUTH_BAD_KEYID:
 	case SCTP_IERROR_BAD_SIG:
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	case SCTP_IERROR_PROTO_VIOLATION:
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	case SCTP_IERROR_NOMEM:
@@ -4084,7 +4147,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
+				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
 				     void *arg,
@@ -4097,20 +4161,20 @@
 	SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk);
 
 	if (!sctp_vtag_verify(unk_chunk, asoc))
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the chunk has a valid length.
 	 * Since we don't know the chunk type, we use a general
 	 * chunkhdr structure to make a comparison.
 	 */
 	if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	switch (type.chunk & SCTP_CID_ACTION_MASK) {
 	case SCTP_CID_ACTION_DISCARD:
 		/* Discard the packet.  */
-		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 		break;
 	case SCTP_CID_ACTION_DISCARD_ERR:
 		/* Generate an ERROR chunk as response. */
@@ -4125,7 +4189,7 @@
 		}
 
 		/* Discard the packet.  */
-		sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+		sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 		return SCTP_DISPOSITION_CONSUME;
 		break;
 	case SCTP_CID_ACTION_SKIP:
@@ -4167,7 +4231,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_discard_chunk(struct net *net,
+					 const struct sctp_endpoint *ep,
 					 const struct sctp_association *asoc,
 					 const sctp_subtype_t type,
 					 void *arg,
@@ -4180,7 +4245,7 @@
 	 * chunkhdr structure to make a comparison.
 	 */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
@@ -4205,13 +4270,14 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_pdiscard(struct net *net,
+				    const struct sctp_endpoint *ep,
 				    const struct sctp_association *asoc,
 				    const sctp_subtype_t type,
 				    void *arg,
 				    sctp_cmd_seq_t *commands)
 {
-	SCTP_INC_STATS(SCTP_MIB_IN_PKT_DISCARDS);
+	SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
 	sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
 
 	return SCTP_DISPOSITION_CONSUME;
@@ -4232,7 +4298,8 @@
  * We simply tag the chunk as a violation.  The state machine will log
  * the violation and continue.
  */
-sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_violation(struct net *net,
+				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
 				     void *arg,
@@ -4242,7 +4309,7 @@
 
 	/* Make sure that the chunk has a valid length. */
 	if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-		return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
 	return SCTP_DISPOSITION_VIOLATION;
@@ -4252,6 +4319,7 @@
  * Common function to handle a protocol violation.
  */
 static sctp_disposition_t sctp_sf_abort_violation(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     void *arg,
@@ -4302,7 +4370,7 @@
 		}
 
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-		SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
 		if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
 			sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -4316,10 +4384,10 @@
 					SCTP_ERROR(ECONNABORTED));
 			sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 					SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-			SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+			SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 		}
 	} else {
-		packet = sctp_ootb_pkt_new(asoc, chunk);
+		packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
 		if (!packet)
 			goto nomem_pkt;
@@ -4334,13 +4402,13 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
 			SCTP_PACKET(packet));
 
-		SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 	}
 
-	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 discard:
-	sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+	sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
 	return SCTP_DISPOSITION_ABORT;
 
 nomem_pkt:
@@ -4369,6 +4437,7 @@
  * Generate an  ABORT chunk and terminate the association.
  */
 static sctp_disposition_t sctp_sf_violation_chunklen(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
@@ -4377,7 +4446,7 @@
 {
 	static const char err_str[]="The following chunk had invalid length:";
 
-	return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+	return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
 					sizeof(err_str));
 }
 
@@ -4388,6 +4457,7 @@
  * the length is considered as invalid.
  */
 static sctp_disposition_t sctp_sf_violation_paramlen(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
@@ -4407,17 +4477,17 @@
 		goto nomem;
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-	SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+	SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 			SCTP_ERROR(ECONNABORTED));
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 			SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-	SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
-	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+	SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 discard:
-	sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+	sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
 	return SCTP_DISPOSITION_ABORT;
 nomem:
 	return SCTP_DISPOSITION_NOMEM;
@@ -4430,6 +4500,7 @@
  * error code.
  */
 static sctp_disposition_t sctp_sf_violation_ctsn(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
@@ -4438,7 +4509,7 @@
 {
 	static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
 
-	return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+	return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
 					sizeof(err_str));
 }
 
@@ -4449,6 +4520,7 @@
  * on the path and we may not want to continue this communication.
  */
 static sctp_disposition_t sctp_sf_violation_chunk(
+				     struct net *net,
 				     const struct sctp_endpoint *ep,
 				     const struct sctp_association *asoc,
 				     const sctp_subtype_t type,
@@ -4458,9 +4530,9 @@
 	static const char err_str[]="The following chunk violates protocol:";
 
 	if (!asoc)
-		return sctp_sf_violation(ep, asoc, type, arg, commands);
+		return sctp_sf_violation(net, ep, asoc, type, arg, commands);
 
-	return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+	return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
 					sizeof(err_str));
 }
 /***************************************************************************
@@ -4523,7 +4595,8 @@
  *
  * The return value is a disposition.
  */
-sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
+				       const struct sctp_endpoint *ep,
 				       const struct sctp_association *asoc,
 				       const sctp_subtype_t type,
 				       void *arg,
@@ -4634,7 +4707,8 @@
  *
  * The return value is the disposition.
  */
-sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_send(struct net *net,
+				       const struct sctp_endpoint *ep,
 				       const struct sctp_association *asoc,
 				       const sctp_subtype_t type,
 				       void *arg,
@@ -4673,6 +4747,7 @@
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -4694,7 +4769,7 @@
 
 	disposition = SCTP_DISPOSITION_CONSUME;
 	if (sctp_outq_is_empty(&asoc->outqueue)) {
-		disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
+		disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
 							    arg, commands);
 	}
 	return disposition;
@@ -4728,6 +4803,7 @@
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_1_prm_abort(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -4759,14 +4835,15 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 			SCTP_PERR(SCTP_ERROR_USER_ABORT));
 
-	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-	SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+	SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
 	return retval;
 }
 
 /* We tried an illegal operation on an association which is closed.  */
-sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_error_closed(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -4779,7 +4856,8 @@
 /* We tried an illegal operation on an association which is shutting
  * down.
  */
-sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_error_shutdown(struct net *net,
+					  const struct sctp_endpoint *ep,
 					  const struct sctp_association *asoc,
 					  const sctp_subtype_t type,
 					  void *arg,
@@ -4805,6 +4883,7 @@
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -4817,7 +4896,7 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_CLOSED));
 
-	SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
+	SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
@@ -4839,6 +4918,7 @@
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -4847,7 +4927,7 @@
 	/* There is a single T1 timer, so we should be able to use
 	 * common function with the COOKIE-WAIT state.
 	 */
-	return sctp_sf_cookie_wait_prm_shutdown(ep, asoc, type, arg, commands);
+	return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4865,6 +4945,7 @@
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -4884,7 +4965,7 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_CLOSED));
 
-	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 	/* Even if we can't send the ABORT due to low memory delete the
 	 * TCB.  This is a departure from our typical NOMEM handling.
@@ -4914,6 +4995,7 @@
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -4923,7 +5005,7 @@
 	/* There is a single T1 timer, so we should be able to use
 	 * common function with the COOKIE-WAIT state.
 	 */
-	return sctp_sf_cookie_wait_prm_abort(ep, asoc, type, arg, commands);
+	return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4939,6 +5021,7 @@
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -4949,7 +5032,7 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
 			SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-	return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
+	return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4965,6 +5048,7 @@
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -4979,7 +5063,7 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
 			SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-	return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
+	return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4995,6 +5079,7 @@
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -5004,7 +5089,7 @@
 	/* The same T2 timer, so we should be able to use
 	 * common function with the SHUTDOWN-SENT state.
 	 */
-	return sctp_sf_shutdown_sent_prm_abort(ep, asoc, type, arg, commands);
+	return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -5030,6 +5115,7 @@
  *   association on which a heartbeat should be issued.
  */
 sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
+					struct net *net,
 					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
@@ -5061,7 +5147,8 @@
  * When an endpoint has an ASCONF signaled change to be sent to the
  * remote endpoint it should do A1 to A9
  */
-sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -5082,6 +5169,7 @@
  * The return value is the disposition of the primitive.
  */
 sctp_disposition_t sctp_sf_ignore_primitive(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -5103,6 +5191,7 @@
  * retransmit, the stack will immediately send up this notification.
  */
 sctp_disposition_t sctp_sf_do_no_pending_tsn(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -5134,6 +5223,7 @@
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -5203,6 +5293,7 @@
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -5221,11 +5312,11 @@
 	 */
 	if (chunk) {
 		if (!sctp_vtag_verify(chunk, asoc))
-			return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
 		/* Make sure that the SHUTDOWN chunk has a valid length. */
 		if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t)))
-			return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+			return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 							  commands);
 	}
 
@@ -5273,7 +5364,8 @@
  *
  * The return value is the disposition of the event.
  */
-sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_ignore_other(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -5298,7 +5390,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -5306,7 +5399,7 @@
 {
 	struct sctp_transport *transport = arg;
 
-	SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
+	SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
 
 	if (asoc->overall_error_count >= asoc->max_retrans) {
 		if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
@@ -5327,8 +5420,8 @@
 			/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 			sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 					SCTP_PERR(SCTP_ERROR_NO_ERROR));
-			SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-			SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+			SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+			SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 			return SCTP_DISPOSITION_DELETE_TCB;
 		}
 	}
@@ -5384,13 +5477,14 @@
  * allow. However, an SCTP transmitter MUST NOT be more aggressive than
  * the following algorithms allow.
  */
-sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net,
+				       const struct sctp_endpoint *ep,
 				       const struct sctp_association *asoc,
 				       const sctp_subtype_t type,
 				       void *arg,
 				       sctp_cmd_seq_t *commands)
 {
-	SCTP_INC_STATS(SCTP_MIB_DELAY_SACK_EXPIREDS);
+	SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
 	sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
 	return SCTP_DISPOSITION_CONSUME;
 }
@@ -5414,7 +5508,8 @@
  * (timers, events)
  *
  */
-sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net,
+					   const struct sctp_endpoint *ep,
 					   const struct sctp_association *asoc,
 					   const sctp_subtype_t type,
 					   void *arg,
@@ -5425,7 +5520,7 @@
 	int attempts = asoc->init_err_counter + 1;
 
 	SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
-	SCTP_INC_STATS(SCTP_MIB_T1_INIT_EXPIREDS);
+	SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
 
 	if (attempts <= asoc->max_init_attempts) {
 		bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
@@ -5475,7 +5570,8 @@
  * (timers, events)
  *
  */
-sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net,
+					   const struct sctp_endpoint *ep,
 					   const struct sctp_association *asoc,
 					   const sctp_subtype_t type,
 					   void *arg,
@@ -5485,7 +5581,7 @@
 	int attempts = asoc->init_err_counter + 1;
 
 	SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
-	SCTP_INC_STATS(SCTP_MIB_T1_COOKIE_EXPIREDS);
+	SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
 
 	if (attempts <= asoc->max_init_attempts) {
 		repl = sctp_make_cookie_echo(asoc, NULL);
@@ -5523,7 +5619,8 @@
  * the T2-Shutdown timer,  giving its peer ample opportunity to transmit
  * all of its queued DATA chunks that have not yet been sent.
  */
-sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net,
+					   const struct sctp_endpoint *ep,
 					   const struct sctp_association *asoc,
 					   const sctp_subtype_t type,
 					   void *arg,
@@ -5532,7 +5629,7 @@
 	struct sctp_chunk *reply = NULL;
 
 	SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
-	SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
+	SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
 
 	((struct sctp_association *)asoc)->shutdown_retries++;
 
@@ -5542,8 +5639,8 @@
 		/* Note:  CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_PERR(SCTP_ERROR_NO_ERROR));
-		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+		SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_DELETE_TCB;
 	}
 
@@ -5592,6 +5689,7 @@
  * If the T4 RTO timer expires the endpoint should do B1 to B5
  */
 sctp_disposition_t sctp_sf_t4_timer_expire(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -5601,7 +5699,7 @@
 	struct sctp_chunk *chunk = asoc->addip_last_asconf;
 	struct sctp_transport *transport = chunk->transport;
 
-	SCTP_INC_STATS(SCTP_MIB_T4_RTO_EXPIREDS);
+	SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
 
 	/* ADDIP 4.1 B1) Increment the error counters and perform path failure
 	 * detection on the appropriate destination address as defined in
@@ -5626,8 +5724,8 @@
 				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_PERR(SCTP_ERROR_NO_ERROR));
-		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+		SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_ABORT;
 	}
 
@@ -5662,7 +5760,8 @@
  * At the expiration of this timer the sender SHOULD abort the association
  * by sending an ABORT chunk.
  */
-sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net,
+					   const struct sctp_endpoint *ep,
 					   const struct sctp_association *asoc,
 					   const sctp_subtype_t type,
 					   void *arg,
@@ -5671,7 +5770,7 @@
 	struct sctp_chunk *reply = NULL;
 
 	SCTP_DEBUG_PRINTK("Timer T5 expired.\n");
-	SCTP_INC_STATS(SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
+	SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
 
 	reply = sctp_make_abort(asoc, NULL, 0);
 	if (!reply)
@@ -5683,8 +5782,8 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 			SCTP_PERR(SCTP_ERROR_NO_ERROR));
 
-	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-	SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+	SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
 	return SCTP_DISPOSITION_DELETE_TCB;
 nomem:
@@ -5697,6 +5796,7 @@
  * the user.  So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
  */
 sctp_disposition_t sctp_sf_autoclose_timer_expire(
+	struct net *net,
 	const struct sctp_endpoint *ep,
 	const struct sctp_association *asoc,
 	const sctp_subtype_t type,
@@ -5705,7 +5805,7 @@
 {
 	int disposition;
 
-	SCTP_INC_STATS(SCTP_MIB_AUTOCLOSE_EXPIREDS);
+	SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
 
 	/* From 9.2 Shutdown of an Association
 	 * Upon receipt of the SHUTDOWN primitive from its upper
@@ -5720,7 +5820,7 @@
 
 	disposition = SCTP_DISPOSITION_CONSUME;
 	if (sctp_outq_is_empty(&asoc->outqueue)) {
-		disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
+		disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
 							    arg, commands);
 	}
 	return disposition;
@@ -5738,7 +5838,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_not_impl(struct net *net,
+				    const struct sctp_endpoint *ep,
 				    const struct sctp_association *asoc,
 				    const sctp_subtype_t type,
 				    void *arg,
@@ -5755,7 +5856,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_bug(struct net *net,
+			       const struct sctp_endpoint *ep,
 			       const struct sctp_association *asoc,
 			       const sctp_subtype_t type,
 			       void *arg,
@@ -5775,7 +5877,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_timer_ignore(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_timer_ignore(struct net *net,
+					const struct sctp_endpoint *ep,
 					const struct sctp_association *asoc,
 					const sctp_subtype_t type,
 					void *arg,
@@ -5817,7 +5920,8 @@
 /* Create an ABORT packet to be sent as a response, with the specified
  * error causes.
  */
-static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
+static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
+				  const struct sctp_endpoint *ep,
 				  const struct sctp_association *asoc,
 				  struct sctp_chunk *chunk,
 				  const void *payload,
@@ -5826,7 +5930,7 @@
 	struct sctp_packet *packet;
 	struct sctp_chunk *abort;
 
-	packet = sctp_ootb_pkt_new(asoc, chunk);
+	packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
 	if (packet) {
 		/* Make an ABORT.
@@ -5858,7 +5962,8 @@
 }
 
 /* Allocate a packet for responding in the OOTB conditions.  */
-static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
+static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
+					     const struct sctp_association *asoc,
 					     const struct sctp_chunk *chunk)
 {
 	struct sctp_packet *packet;
@@ -5911,7 +6016,7 @@
 	}
 
 	/* Make a transport for the bucket, Eliza... */
-	transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
+	transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
 	if (!transport)
 		goto nomem;
 
@@ -5919,7 +6024,7 @@
 	 * the source address.
 	 */
 	sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
-			     sctp_sk(sctp_get_ctl_sock()));
+			     sctp_sk(net->sctp.ctl_sock));
 
 	packet = sctp_packet_init(&transport->packet, transport, sport, dport);
 	packet = sctp_packet_config(packet, vtag, 0);
@@ -5937,7 +6042,8 @@
 }
 
 /* Send a stale cookie error when a invalid COOKIE ECHO chunk is found  */
-static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
+static void sctp_send_stale_cookie_err(struct net *net,
+				       const struct sctp_endpoint *ep,
 				       const struct sctp_association *asoc,
 				       const struct sctp_chunk *chunk,
 				       sctp_cmd_seq_t *commands,
@@ -5946,7 +6052,7 @@
 	struct sctp_packet *packet;
 
 	if (err_chunk) {
-		packet = sctp_ootb_pkt_new(asoc, chunk);
+		packet = sctp_ootb_pkt_new(net, asoc, chunk);
 		if (packet) {
 			struct sctp_signed_cookie *cookie;
 
@@ -5959,7 +6065,7 @@
 			sctp_packet_append_chunk(packet, err_chunk);
 			sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
 					SCTP_PACKET(packet));
-			SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+			SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 		} else
 			sctp_chunk_free (err_chunk);
 	}
@@ -5979,6 +6085,7 @@
 	__u32 tsn;
 	struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
 	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 	u16 ssn;
 	u16 sid;
 	u8 ordered = 0;
@@ -6109,8 +6216,8 @@
 				SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_PERR(SCTP_ERROR_NO_DATA));
-		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+		SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 		return SCTP_IERROR_NO_DATA;
 	}
 
@@ -6120,9 +6227,9 @@
 	 * if we renege and the chunk arrives again.
 	 */
 	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-		SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
+		SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
 	else {
-		SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
+		SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
 		ordered = 1;
 	}
 
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 7c211a7..84d98d8 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -59,7 +59,8 @@
 static const sctp_sm_table_entry_t
 timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES];
 
-static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
+static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
+							    sctp_cid_t cid,
 							    sctp_state_t state);
 
 
@@ -82,13 +83,14 @@
 	rtn;								\
 })
 
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net,
+						  sctp_event_t event_type,
 						  sctp_state_t state,
 						  sctp_subtype_t event_subtype)
 {
 	switch (event_type) {
 	case SCTP_EVENT_T_CHUNK:
-		return sctp_chunk_event_lookup(event_subtype.chunk, state);
+		return sctp_chunk_event_lookup(net, event_subtype.chunk, state);
 	case SCTP_EVENT_T_TIMEOUT:
 		return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
 				 timeout_event_table);
@@ -906,7 +908,8 @@
 	TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
 };
 
-static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
+static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
+							    sctp_cid_t cid,
 							    sctp_state_t state)
 {
 	if (state > SCTP_STATE_MAX)
@@ -915,12 +918,12 @@
 	if (cid <= SCTP_CID_BASE_MAX)
 		return &chunk_event_table[cid][state];
 
-	if (sctp_prsctp_enable) {
+	if (net->sctp.prsctp_enable) {
 		if (cid == SCTP_CID_FWD_TSN)
 			return &prsctp_chunk_event_table[0][state];
 	}
 
-	if (sctp_addip_enable) {
+	if (net->sctp.addip_enable) {
 		if (cid == SCTP_CID_ASCONF)
 			return &addip_chunk_event_table[0][state];
 
@@ -928,7 +931,7 @@
 			return &addip_chunk_event_table[1][state];
 	}
 
-	if (sctp_auth_enable) {
+	if (net->sctp.auth_enable) {
 		if (cid == SCTP_CID_AUTH)
 			return &auth_chunk_event_table[0][state];
 	}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5e25981..d37d24f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -427,6 +427,7 @@
 static int sctp_send_asconf(struct sctp_association *asoc,
 			    struct sctp_chunk *chunk)
 {
+	struct net 	*net = sock_net(asoc->base.sk);
 	int		retval = 0;
 
 	/* If there is an outstanding ASCONF chunk, queue it for later
@@ -439,7 +440,7 @@
 
 	/* Hold the chunk until an ASCONF_ACK is received. */
 	sctp_chunk_hold(chunk);
-	retval = sctp_primitive_ASCONF(asoc, chunk);
+	retval = sctp_primitive_ASCONF(net, asoc, chunk);
 	if (retval)
 		sctp_chunk_free(chunk);
 	else
@@ -515,6 +516,7 @@
 				   struct sockaddr	*addrs,
 				   int 			addrcnt)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_sock		*sp;
 	struct sctp_endpoint		*ep;
 	struct sctp_association		*asoc;
@@ -529,7 +531,7 @@
 	int 				i;
 	int 				retval = 0;
 
-	if (!sctp_addip_enable)
+	if (!net->sctp.addip_enable)
 		return retval;
 
 	sp = sctp_sk(sk);
@@ -717,6 +719,7 @@
 				   struct sockaddr	*addrs,
 				   int			addrcnt)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_sock	*sp;
 	struct sctp_endpoint	*ep;
 	struct sctp_association	*asoc;
@@ -732,7 +735,7 @@
 	int			stored = 0;
 
 	chunk = NULL;
-	if (!sctp_addip_enable)
+	if (!net->sctp.addip_enable)
 		return retval;
 
 	sp = sctp_sk(sk);
@@ -1050,6 +1053,7 @@
 			  int addrs_size,
 			  sctp_assoc_t *assoc_id)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_sock *sp;
 	struct sctp_endpoint *ep;
 	struct sctp_association *asoc = NULL;
@@ -1200,7 +1204,7 @@
 			goto out_free;
 	}
 
-	err = sctp_primitive_ASSOCIATE(asoc, NULL);
+	err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
 	if (err < 0) {
 		goto out_free;
 	}
@@ -1458,6 +1462,7 @@
  */
 SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_endpoint *ep;
 	struct sctp_association *asoc;
 	struct list_head *pos, *temp;
@@ -1499,9 +1504,9 @@
 
 			chunk = sctp_make_abort_user(asoc, NULL, 0);
 			if (chunk)
-				sctp_primitive_ABORT(asoc, chunk);
+				sctp_primitive_ABORT(net, asoc, chunk);
 		} else
-			sctp_primitive_SHUTDOWN(asoc, NULL);
+			sctp_primitive_SHUTDOWN(net, asoc, NULL);
 	}
 
 	/* On a TCP-style socket, block for at most linger_time if set. */
@@ -1569,6 +1574,7 @@
 SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
 			     struct msghdr *msg, size_t msg_len)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_sock *sp;
 	struct sctp_endpoint *ep;
 	struct sctp_association *new_asoc=NULL, *asoc=NULL;
@@ -1714,7 +1720,7 @@
 		if (sinfo_flags & SCTP_EOF) {
 			SCTP_DEBUG_PRINTK("Shutting down association: %p\n",
 					  asoc);
-			sctp_primitive_SHUTDOWN(asoc, NULL);
+			sctp_primitive_SHUTDOWN(net, asoc, NULL);
 			err = 0;
 			goto out_unlock;
 		}
@@ -1727,7 +1733,7 @@
 			}
 
 			SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
-			sctp_primitive_ABORT(asoc, chunk);
+			sctp_primitive_ABORT(net, asoc, chunk);
 			err = 0;
 			goto out_unlock;
 		}
@@ -1900,7 +1906,7 @@
 
 	/* Auto-connect, if we aren't connected already. */
 	if (sctp_state(asoc, CLOSED)) {
-		err = sctp_primitive_ASSOCIATE(asoc, NULL);
+		err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
 		if (err < 0)
 			goto out_free;
 		SCTP_DEBUG_PRINTK("We associated primitively.\n");
@@ -1928,7 +1934,7 @@
 	 * works that way today.  Keep it that way or this
 	 * breaks.
 	 */
-	err = sctp_primitive_SEND(asoc, datamsg);
+	err = sctp_primitive_SEND(net, asoc, datamsg);
 	/* Did the lower layer accept the chunk? */
 	if (err)
 		sctp_datamsg_free(datamsg);
@@ -2320,7 +2326,9 @@
 	int error;
 
 	if (params->spp_flags & SPP_HB_DEMAND && trans) {
-		error = sctp_primitive_REQUESTHEARTBEAT (trans->asoc, trans);
+		struct net *net = sock_net(trans->asoc->base.sk);
+
+		error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
 		if (error)
 			return error;
 	}
@@ -3033,6 +3041,7 @@
 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
 					     unsigned int optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_sock	*sp;
 	struct sctp_association	*asoc = NULL;
 	struct sctp_setpeerprim	prim;
@@ -3042,7 +3051,7 @@
 
 	sp = sctp_sk(sk);
 
-	if (!sctp_addip_enable)
+	if (!net->sctp.addip_enable)
 		return -EPERM;
 
 	if (optlen != sizeof(struct sctp_setpeerprim))
@@ -3279,9 +3288,10 @@
 				      char __user *optval,
 				      unsigned int optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_authchunk val;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	if (optlen != sizeof(struct sctp_authchunk))
@@ -3311,11 +3321,12 @@
 				      char __user *optval,
 				      unsigned int optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_hmacalgo *hmacs;
 	u32 idents;
 	int err;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	if (optlen < sizeof(struct sctp_hmacalgo))
@@ -3348,11 +3359,12 @@
 				    char __user *optval,
 				    unsigned int optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_authkey *authkey;
 	struct sctp_association *asoc;
 	int ret;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	if (optlen <= sizeof(struct sctp_authkey))
@@ -3389,10 +3401,11 @@
 				      char __user *optval,
 				      unsigned int optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_authkeyid val;
 	struct sctp_association *asoc;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	if (optlen != sizeof(struct sctp_authkeyid))
@@ -3417,10 +3430,11 @@
 				   char __user *optval,
 				   unsigned int optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_authkeyid val;
 	struct sctp_association *asoc;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	if (optlen != sizeof(struct sctp_authkeyid))
@@ -3471,7 +3485,7 @@
 		sp->do_auto_asconf = 0;
 	} else if (val && !sp->do_auto_asconf) {
 		list_add_tail(&sp->auto_asconf_list,
-		    &sctp_auto_asconf_splist);
+		    &sock_net(sk)->sctp.auto_asconf_splist);
 		sp->do_auto_asconf = 1;
 	}
 	return 0;
@@ -3843,6 +3857,7 @@
  */
 SCTP_STATIC int sctp_init_sock(struct sock *sk)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_endpoint *ep;
 	struct sctp_sock *sp;
 
@@ -3872,7 +3887,7 @@
 	sp->default_timetolive = 0;
 
 	sp->default_rcv_context = 0;
-	sp->max_burst = sctp_max_burst;
+	sp->max_burst = net->sctp.max_burst;
 
 	/* Initialize default setup parameters. These parameters
 	 * can be modified with the SCTP_INITMSG socket option or
@@ -3880,24 +3895,24 @@
 	 */
 	sp->initmsg.sinit_num_ostreams   = sctp_max_outstreams;
 	sp->initmsg.sinit_max_instreams  = sctp_max_instreams;
-	sp->initmsg.sinit_max_attempts   = sctp_max_retrans_init;
-	sp->initmsg.sinit_max_init_timeo = sctp_rto_max;
+	sp->initmsg.sinit_max_attempts   = net->sctp.max_retrans_init;
+	sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
 
 	/* Initialize default RTO related parameters.  These parameters can
 	 * be modified for with the SCTP_RTOINFO socket option.
 	 */
-	sp->rtoinfo.srto_initial = sctp_rto_initial;
-	sp->rtoinfo.srto_max     = sctp_rto_max;
-	sp->rtoinfo.srto_min     = sctp_rto_min;
+	sp->rtoinfo.srto_initial = net->sctp.rto_initial;
+	sp->rtoinfo.srto_max     = net->sctp.rto_max;
+	sp->rtoinfo.srto_min     = net->sctp.rto_min;
 
 	/* Initialize default association related parameters. These parameters
 	 * can be modified with the SCTP_ASSOCINFO socket option.
 	 */
-	sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association;
+	sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
 	sp->assocparams.sasoc_number_peer_destinations = 0;
 	sp->assocparams.sasoc_peer_rwnd = 0;
 	sp->assocparams.sasoc_local_rwnd = 0;
-	sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
+	sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
 
 	/* Initialize default event subscriptions. By default, all the
 	 * options are off.
@@ -3907,10 +3922,10 @@
 	/* Default Peer Address Parameters.  These defaults can
 	 * be modified via SCTP_PEER_ADDR_PARAMS
 	 */
-	sp->hbinterval  = sctp_hb_interval;
-	sp->pathmaxrxt  = sctp_max_retrans_path;
+	sp->hbinterval  = net->sctp.hb_interval;
+	sp->pathmaxrxt  = net->sctp.max_retrans_path;
 	sp->pathmtu     = 0; // allow default discovery
-	sp->sackdelay   = sctp_sack_timeout;
+	sp->sackdelay   = net->sctp.sack_timeout;
 	sp->sackfreq	= 2;
 	sp->param_flags = SPP_HB_ENABLE |
 			  SPP_PMTUD_ENABLE |
@@ -3961,10 +3976,10 @@
 
 	local_bh_disable();
 	percpu_counter_inc(&sctp_sockets_allocated);
-	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-	if (sctp_default_auto_asconf) {
+	sock_prot_inuse_add(net, sk->sk_prot, 1);
+	if (net->sctp.default_auto_asconf) {
 		list_add_tail(&sp->auto_asconf_list,
-		    &sctp_auto_asconf_splist);
+		    &net->sctp.auto_asconf_splist);
 		sp->do_auto_asconf = 1;
 	} else
 		sp->do_auto_asconf = 0;
@@ -4011,6 +4026,7 @@
  */
 SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_endpoint *ep;
 	struct sctp_association *asoc;
 
@@ -4022,7 +4038,7 @@
 		if (!list_empty(&ep->asocs)) {
 			asoc = list_entry(ep->asocs.next,
 					  struct sctp_association, asocs);
-			sctp_primitive_SHUTDOWN(asoc, NULL);
+			sctp_primitive_SHUTDOWN(net, asoc, NULL);
 		}
 	}
 }
@@ -4653,9 +4669,10 @@
 	union sctp_addr temp;
 	int cnt = 0;
 	int addrlen;
+	struct net *net = sock_net(sk);
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+	list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
 		if (!addr->valid)
 			continue;
 
@@ -5299,12 +5316,13 @@
 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
 				    char __user *optval, int __user *optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_hmacalgo  __user *p = (void __user *)optval;
 	struct sctp_hmac_algo_param *hmacs;
 	__u16 data_len = 0;
 	u32 num_idents;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
@@ -5328,10 +5346,11 @@
 static int sctp_getsockopt_active_key(struct sock *sk, int len,
 				    char __user *optval, int __user *optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_authkeyid val;
 	struct sctp_association *asoc;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	if (len < sizeof(struct sctp_authkeyid))
@@ -5360,6 +5379,7 @@
 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
 				    char __user *optval, int __user *optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_authchunks __user *p = (void __user *)optval;
 	struct sctp_authchunks val;
 	struct sctp_association *asoc;
@@ -5367,7 +5387,7 @@
 	u32    num_chunks = 0;
 	char __user *to;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	if (len < sizeof(struct sctp_authchunks))
@@ -5403,6 +5423,7 @@
 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
 				    char __user *optval, int __user *optlen)
 {
+	struct net *net = sock_net(sk);
 	struct sctp_authchunks __user *p = (void __user *)optval;
 	struct sctp_authchunks val;
 	struct sctp_association *asoc;
@@ -5410,7 +5431,7 @@
 	u32    num_chunks = 0;
 	char __user *to;
 
-	if (!sctp_auth_enable)
+	if (!net->sctp.auth_enable)
 		return -EACCES;
 
 	if (len < sizeof(struct sctp_authchunks))
@@ -5769,7 +5790,7 @@
  * a fastreuse flag (FIXME: NPI ipg).
  */
 static struct sctp_bind_bucket *sctp_bucket_create(
-	struct sctp_bind_hashbucket *head, unsigned short snum);
+	struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
 
 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 {
@@ -5799,11 +5820,12 @@
 				rover = low;
 			if (inet_is_reserved_local_port(rover))
 				continue;
-			index = sctp_phashfn(rover);
+			index = sctp_phashfn(sock_net(sk), rover);
 			head = &sctp_port_hashtable[index];
 			sctp_spin_lock(&head->lock);
 			sctp_for_each_hentry(pp, node, &head->chain)
-				if (pp->port == rover)
+				if ((pp->port == rover) &&
+				    net_eq(sock_net(sk), pp->net))
 					goto next;
 			break;
 		next:
@@ -5827,10 +5849,10 @@
 		 * to the port number (snum) - we detect that with the
 		 * port iterator, pp being NULL.
 		 */
-		head = &sctp_port_hashtable[sctp_phashfn(snum)];
+		head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
 		sctp_spin_lock(&head->lock);
 		sctp_for_each_hentry(pp, node, &head->chain) {
-			if (pp->port == snum)
+			if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
 				goto pp_found;
 		}
 	}
@@ -5881,7 +5903,7 @@
 pp_not_found:
 	/* If there was a hash table miss, create a new port.  */
 	ret = 1;
-	if (!pp && !(pp = sctp_bucket_create(head, snum)))
+	if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
 		goto fail_unlock;
 
 	/* In either case (hit or miss), make sure fastreuse is 1 only
@@ -6113,7 +6135,7 @@
  ********************************************************************/
 
 static struct sctp_bind_bucket *sctp_bucket_create(
-	struct sctp_bind_hashbucket *head, unsigned short snum)
+	struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
 {
 	struct sctp_bind_bucket *pp;
 
@@ -6123,6 +6145,7 @@
 		pp->port = snum;
 		pp->fastreuse = 0;
 		INIT_HLIST_HEAD(&pp->owner);
+		pp->net = net;
 		hlist_add_head(&pp->node, &head->chain);
 	}
 	return pp;
@@ -6142,7 +6165,8 @@
 static inline void __sctp_put_port(struct sock *sk)
 {
 	struct sctp_bind_hashbucket *head =
-		&sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)];
+		&sctp_port_hashtable[sctp_phashfn(sock_net(sk),
+						  inet_sk(sk)->inet_num)];
 	struct sctp_bind_bucket *pp;
 
 	sctp_spin_lock(&head->lock);
@@ -6809,7 +6833,8 @@
 	newsp->hmac = NULL;
 
 	/* Hook this new socket in to the bind_hash list. */
-	head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)];
+	head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
+						 inet_sk(oldsk)->inet_num)];
 	sctp_local_bh_disable();
 	sctp_spin_lock(&head->lock);
 	pp = sctp_sk(oldsk)->bind_hash;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 2b2bfe9..70e3ba5 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -64,161 +64,6 @@
 
 static ctl_table sctp_table[] = {
 	{
-		.procname	= "rto_initial",
-		.data		= &sctp_rto_initial,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1         = &one,
-		.extra2         = &timer_max
-	},
-	{
-		.procname	= "rto_min",
-		.data		= &sctp_rto_min,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1         = &one,
-		.extra2         = &timer_max
-	},
-	{
-		.procname	= "rto_max",
-		.data		= &sctp_rto_max,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1         = &one,
-		.extra2         = &timer_max
-	},
-	{
-		.procname	= "valid_cookie_life",
-		.data		= &sctp_valid_cookie_life,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1         = &one,
-		.extra2         = &timer_max
-	},
-	{
-		.procname	= "max_burst",
-		.data		= &sctp_max_burst,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-		.extra2		= &int_max
-	},
-	{
-		.procname	= "association_max_retrans",
-		.data		= &sctp_max_retrans_association,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
-		.extra2		= &int_max
-	},
-	{
-		.procname	= "sndbuf_policy",
-		.data		= &sctp_sndbuf_policy,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "rcvbuf_policy",
-		.data		= &sctp_rcvbuf_policy,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "path_max_retrans",
-		.data		= &sctp_max_retrans_path,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
-		.extra2		= &int_max
-	},
-	{
-		.procname	= "pf_retrans",
-		.data		= &sctp_pf_retrans,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-		.extra2		= &int_max
-	},
-	{
-		.procname	= "max_init_retransmits",
-		.data		= &sctp_max_retrans_init,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
-		.extra2		= &int_max
-	},
-	{
-		.procname	= "hb_interval",
-		.data		= &sctp_hb_interval,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1         = &one,
-		.extra2         = &timer_max
-	},
-	{
-		.procname	= "cookie_preserve_enable",
-		.data		= &sctp_cookie_preserve_enable,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "rto_alpha_exp_divisor",
-		.data		= &sctp_rto_alpha,
-		.maxlen		= sizeof(int),
-		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "rto_beta_exp_divisor",
-		.data		= &sctp_rto_beta,
-		.maxlen		= sizeof(int),
-		.mode		= 0444,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "addip_enable",
-		.data		= &sctp_addip_enable,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "default_auto_asconf",
-		.data		= &sctp_default_auto_asconf,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "prsctp_enable",
-		.data		= &sctp_prsctp_enable,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "sack_timeout",
-		.data		= &sctp_sack_timeout,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1         = &sack_timer_min,
-		.extra2         = &sack_timer_max,
-	},
-	{
 		.procname	= "sctp_mem",
 		.data		= &sysctl_sctp_mem,
 		.maxlen		= sizeof(sysctl_sctp_mem),
@@ -239,23 +84,183 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+
+	{ /* sentinel */ }
+};
+
+static ctl_table sctp_net_table[] = {
 	{
-		.procname	= "auth_enable",
-		.data		= &sctp_auth_enable,
+		.procname	= "rto_initial",
+		.data		= &init_net.sctp.rto_initial,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1         = &one,
+		.extra2         = &timer_max
+	},
+	{
+		.procname	= "rto_min",
+		.data		= &init_net.sctp.rto_min,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1         = &one,
+		.extra2         = &timer_max
+	},
+	{
+		.procname	= "rto_max",
+		.data		= &init_net.sctp.rto_max,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1         = &one,
+		.extra2         = &timer_max
+	},
+	{
+		.procname	= "rto_alpha_exp_divisor",
+		.data		= &init_net.sctp.rto_alpha,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "rto_beta_exp_divisor",
+		.data		= &init_net.sctp.rto_beta,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "max_burst",
+		.data		= &init_net.sctp.max_burst,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &int_max
+	},
+	{
+		.procname	= "cookie_preserve_enable",
+		.data		= &init_net.sctp.cookie_preserve_enable,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "valid_cookie_life",
+		.data		= &init_net.sctp.valid_cookie_life,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1         = &one,
+		.extra2         = &timer_max
+	},
+	{
+		.procname	= "sack_timeout",
+		.data		= &init_net.sctp.sack_timeout,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1         = &sack_timer_min,
+		.extra2         = &sack_timer_max,
+	},
+	{
+		.procname	= "hb_interval",
+		.data		= &init_net.sctp.hb_interval,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1         = &one,
+		.extra2         = &timer_max
+	},
+	{
+		.procname	= "association_max_retrans",
+		.data		= &init_net.sctp.max_retrans_association,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &int_max
+	},
+	{
+		.procname	= "path_max_retrans",
+		.data		= &init_net.sctp.max_retrans_path,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &int_max
+	},
+	{
+		.procname	= "max_init_retransmits",
+		.data		= &init_net.sctp.max_retrans_init,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &int_max
+	},
+	{
+		.procname	= "pf_retrans",
+		.data		= &init_net.sctp.pf_retrans,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &int_max
+	},
+	{
+		.procname	= "sndbuf_policy",
+		.data		= &init_net.sctp.sndbuf_policy,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "rcvbuf_policy",
+		.data		= &init_net.sctp.rcvbuf_policy,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "default_auto_asconf",
+		.data		= &init_net.sctp.default_auto_asconf,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "addip_enable",
+		.data		= &init_net.sctp.addip_enable,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
 	{
 		.procname	= "addip_noauth_enable",
-		.data		= &sctp_addip_noauth,
+		.data		= &init_net.sctp.addip_noauth,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "prsctp_enable",
+		.data		= &init_net.sctp.prsctp_enable,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "auth_enable",
+		.data		= &init_net.sctp.auth_enable,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
 	{
 		.procname	= "addr_scope_policy",
-		.data		= &sctp_scope_policy,
+		.data		= &init_net.sctp.scope_policy,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
@@ -264,7 +269,7 @@
 	},
 	{
 		.procname	= "rwnd_update_shift",
-		.data		= &sctp_rwnd_upd_shift,
+		.data		= &init_net.sctp.rwnd_upd_shift,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec_minmax,
@@ -273,7 +278,7 @@
 	},
 	{
 		.procname	= "max_autoclose",
-		.data		= &sctp_max_autoclose,
+		.data		= &init_net.sctp.max_autoclose,
 		.maxlen		= sizeof(unsigned long),
 		.mode		= 0644,
 		.proc_handler	= &proc_doulongvec_minmax,
@@ -284,6 +289,27 @@
 	{ /* sentinel */ }
 };
 
+int sctp_sysctl_net_register(struct net *net)
+{
+	struct ctl_table *table;
+	int i;
+
+	table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	for (i = 0; table[i].data; i++)
+		table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+
+	net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+	return 0;
+}
+
+void sctp_sysctl_net_unregister(struct net *net)
+{
+	unregister_net_sysctl_table(net->sctp.sysctl_header);
+}
+
 static struct ctl_table_header * sctp_sysctl_header;
 
 /* Sysctl registration.  */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index c97472b..953c21e 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -59,7 +59,8 @@
 /* 1st Level Abstractions.  */
 
 /* Initialize a new transport from provided memory.  */
-static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
+static struct sctp_transport *sctp_transport_init(struct net *net,
+						  struct sctp_transport *peer,
 						  const union sctp_addr *addr,
 						  gfp_t gfp)
 {
@@ -76,7 +77,7 @@
 	 * given destination transport address, set RTO to the protocol
 	 * parameter 'RTO.Initial'.
 	 */
-	peer->rto = msecs_to_jiffies(sctp_rto_initial);
+	peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
 
 	peer->last_time_heard = jiffies;
 	peer->last_time_ecne_reduced = jiffies;
@@ -86,8 +87,8 @@
 			    SPP_SACKDELAY_ENABLE;
 
 	/* Initialize the default path max_retrans.  */
-	peer->pathmaxrxt  = sctp_max_retrans_path;
-	peer->pf_retrans  = sctp_pf_retrans;
+	peer->pathmaxrxt  = net->sctp.max_retrans_path;
+	peer->pf_retrans  = net->sctp.pf_retrans;
 
 	INIT_LIST_HEAD(&peer->transmitted);
 	INIT_LIST_HEAD(&peer->send_ready);
@@ -109,7 +110,8 @@
 }
 
 /* Allocate and initialize a new transport.  */
-struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
+struct sctp_transport *sctp_transport_new(struct net *net,
+					  const union sctp_addr *addr,
 					  gfp_t gfp)
 {
 	struct sctp_transport *transport;
@@ -118,7 +120,7 @@
 	if (!transport)
 		goto fail;
 
-	if (!sctp_transport_init(transport, addr, gfp))
+	if (!sctp_transport_init(net, transport, addr, gfp))
 		goto fail_init;
 
 	transport->malloced = 1;
@@ -316,6 +318,7 @@
 	SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
 
 	if (tp->rttvar || tp->srtt) {
+		struct net *net = sock_net(tp->asoc->base.sk);
 		/* 6.3.1 C3) When a new RTT measurement R' is made, set
 		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
 		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
@@ -327,10 +330,10 @@
 		 * For example, assuming the default value of RTO.Alpha of
 		 * 1/8, rto_alpha would be expressed as 3.
 		 */
-		tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
-			+ ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
-		tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
-			+ (rtt >> sctp_rto_alpha);
+		tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
+			+ ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta);
+		tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
+			+ (rtt >> net->sctp.rto_alpha);
 	} else {
 		/* 6.3.1 C2) When the first RTT measurement R is made, set
 		 * SRTT <- R, RTTVAR <- R/2.
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index f5a6a4f..360d869 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -326,7 +326,9 @@
  * payload was fragmented on the way and ip had to reassemble them.
  * We add the rest of skb's to the first skb's fraglist.
  */
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
+static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
+	struct sk_buff_head *queue, struct sk_buff *f_frag,
+	struct sk_buff *l_frag)
 {
 	struct sk_buff *pos;
 	struct sk_buff *new = NULL;
@@ -394,7 +396,7 @@
 	}
 
 	event = sctp_skb2event(f_frag);
-	SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
+	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 
 	return event;
 }
@@ -493,7 +495,8 @@
 		cevent = sctp_skb2event(pd_first);
 		pd_point = sctp_sk(asoc->base.sk)->pd_point;
 		if (pd_point && pd_point <= pd_len) {
-			retval = sctp_make_reassembled_event(&ulpq->reasm,
+			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+							     &ulpq->reasm,
 							     pd_first,
 							     pd_last);
 			if (retval)
@@ -503,7 +506,8 @@
 done:
 	return retval;
 found:
-	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm, first_frag, pos);
 	if (retval)
 		retval->msg_flags |= MSG_EOR;
 	goto done;
@@ -563,7 +567,8 @@
 	 * further.
 	 */
 done:
-	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					&ulpq->reasm, first_frag, last_frag);
 	if (retval && is_last)
 		retval->msg_flags |= MSG_EOR;
 
@@ -655,7 +660,8 @@
 	 * further.
 	 */
 done:
-	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					&ulpq->reasm, first_frag, last_frag);
 	return retval;
 }
 
diff --git a/net/socket.c b/net/socket.c
index dfe5b66..a5471f8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2657,6 +2657,7 @@
 	if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
 		return -EFAULT;
 
+	memset(&ifc, 0, sizeof(ifc));
 	if (ifc32.ifcbuf == 0) {
 		ifc32.ifc_len = 0;
 		ifc.ifc_len = 0;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 09e7124..4ec5c80 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -49,21 +49,6 @@
 static void bearer_disable(struct tipc_bearer *b_ptr);
 
 /**
- * media_name_valid - validate media name
- *
- * Returns 1 if media name is valid, otherwise 0.
- */
-static int media_name_valid(const char *name)
-{
-	u32 len;
-
-	len = strlen(name);
-	if ((len + 1) > TIPC_MAX_MEDIA_NAME)
-		return 0;
-	return strspn(name, tipc_alphabet) == len;
-}
-
-/**
  * tipc_media_find - locates specified media object by name
  */
 struct tipc_media *tipc_media_find(const char *name)
@@ -102,7 +87,7 @@
 
 	write_lock_bh(&tipc_net_lock);
 
-	if (!media_name_valid(m_ptr->name))
+	if ((strlen(m_ptr->name) + 1) > TIPC_MAX_MEDIA_NAME)
 		goto exit;
 	if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) ||
 	    !m_ptr->bcast_addr.broadcast)
@@ -206,9 +191,7 @@
 
 	/* validate component parts of bearer name */
 	if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
-	    (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
-	    (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
-	    (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+	    (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME))
 		return 0;
 
 	/* return bearer name components, if necessary */
diff --git a/net/tipc/config.c b/net/tipc/config.c
index a056a38..f67866c 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -2,7 +2,7 @@
  * net/tipc/config.c: TIPC configuration management code
  *
  * Copyright (c) 2002-2006, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2012, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -208,36 +208,6 @@
 	return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_set_max_publications(void)
-{
-	u32 value;
-
-	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-	if (value < 1 || value > 65535)
-		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-						   " (max publications must be 1-65535)");
-	tipc_max_publications = value;
-	return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_subscriptions(void)
-{
-	u32 value;
-
-	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-	if (value < 1 || value > 65535)
-		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-						   " (max subscriptions must be 1-65535");
-	tipc_max_subscriptions = value;
-	return tipc_cfg_reply_none();
-}
-
 static struct sk_buff *cfg_set_max_ports(void)
 {
 	u32 value;
@@ -357,12 +327,6 @@
 	case TIPC_CMD_SET_MAX_PORTS:
 		rep_tlv_buf = cfg_set_max_ports();
 		break;
-	case TIPC_CMD_SET_MAX_PUBL:
-		rep_tlv_buf = cfg_set_max_publications();
-		break;
-	case TIPC_CMD_SET_MAX_SUBSCR:
-		rep_tlv_buf = cfg_set_max_subscriptions();
-		break;
 	case TIPC_CMD_SET_NETID:
 		rep_tlv_buf = cfg_set_netid();
 		break;
@@ -372,12 +336,6 @@
 	case TIPC_CMD_GET_MAX_PORTS:
 		rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
 		break;
-	case TIPC_CMD_GET_MAX_PUBL:
-		rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
-		break;
-	case TIPC_CMD_GET_MAX_SUBSCR:
-		rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
-		break;
 	case TIPC_CMD_GET_NETID:
 		rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
 		break;
@@ -393,6 +351,10 @@
 	case TIPC_CMD_GET_MAX_CLUSTERS:
 	case TIPC_CMD_SET_MAX_NODES:
 	case TIPC_CMD_GET_MAX_NODES:
+	case TIPC_CMD_SET_MAX_SUBSCR:
+	case TIPC_CMD_GET_MAX_SUBSCR:
+	case TIPC_CMD_SET_MAX_PUBL:
+	case TIPC_CMD_GET_MAX_PUBL:
 	case TIPC_CMD_SET_LOG_SIZE:
 	case TIPC_CMD_DUMP_LOG:
 		rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 6586eac..bfe8af8 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -48,18 +48,13 @@
 
 
 /* global variables used by multiple sub-systems within TIPC */
-int tipc_random;
-
-const char tipc_alphabet[] =
-	"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
+int tipc_random __read_mostly;
 
 /* configurable TIPC parameters */
-u32 tipc_own_addr;
-int tipc_max_ports;
-int tipc_max_subscriptions;
-int tipc_max_publications;
-int tipc_net_id;
-int tipc_remote_management;
+u32 tipc_own_addr __read_mostly;
+int tipc_max_ports __read_mostly;
+int tipc_net_id __read_mostly;
+int tipc_remote_management __read_mostly;
 
 
 /**
@@ -101,9 +96,8 @@
 {
 	int res;
 
-	res = tipc_net_start(addr);
-	if (!res)
-		res = tipc_eth_media_start();
+	tipc_net_start(addr);
+	res = tipc_eth_media_start();
 	if (res)
 		tipc_core_stop_net();
 	return res;
@@ -160,8 +154,6 @@
 
 	tipc_own_addr = 0;
 	tipc_remote_management = 1;
-	tipc_max_publications = 10000;
-	tipc_max_subscriptions = 2000;
 	tipc_max_ports = CONFIG_TIPC_PORTS;
 	tipc_net_id = 4711;
 
diff --git a/net/tipc/core.h b/net/tipc/core.h
index fd42e10..0207db0 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -60,7 +60,9 @@
 
 #define TIPC_MOD_VER "2.0.0"
 
-#define ULTRA_STRING_MAX_LEN 32768
+#define ULTRA_STRING_MAX_LEN	32768
+#define TIPC_MAX_SUBSCRIPTIONS	65535
+#define TIPC_MAX_PUBLICATIONS	65535
 
 struct tipc_msg;	/* msg.h */
 
@@ -74,19 +76,15 @@
 /*
  * Global configuration variables
  */
-extern u32 tipc_own_addr;
-extern int tipc_max_ports;
-extern int tipc_max_subscriptions;
-extern int tipc_max_publications;
-extern int tipc_net_id;
-extern int tipc_remote_management;
+extern u32 tipc_own_addr __read_mostly;
+extern int tipc_max_ports __read_mostly;
+extern int tipc_net_id __read_mostly;
+extern int tipc_remote_management __read_mostly;
 
 /*
  * Other global variables
  */
-extern int tipc_random;
-extern const char tipc_alphabet[];
-
+extern int tipc_random __read_mostly;
 
 /*
  * Routines available to privileged subsystems
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 90ac9bf..2132c1e 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -46,19 +46,30 @@
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ * @setup: work item used when enabling bearer
  * @cleanup: work item used when disabling bearer
  */
 struct eth_bearer {
 	struct tipc_bearer *bearer;
 	struct net_device *dev;
 	struct packet_type tipc_packet_type;
+	struct work_struct setup;
 	struct work_struct cleanup;
 };
 
 static struct tipc_media eth_media_info;
 static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
 static int eth_started;
-static struct notifier_block notifier;
+
+static int recv_notification(struct notifier_block *nb, unsigned long evt,
+			      void *dv);
+/*
+ * Network device notifier info
+ */
+static struct notifier_block notifier = {
+	.notifier_call	= recv_notification,
+	.priority	= 0
+};
 
 /**
  * eth_media_addr_set - initialize Ethernet media address structure
@@ -134,6 +145,17 @@
 }
 
 /**
+ * setup_bearer - setup association between Ethernet bearer and interface
+ */
+static void setup_bearer(struct work_struct *work)
+{
+	struct eth_bearer *eb_ptr =
+		container_of(work, struct eth_bearer, setup);
+
+	dev_add_pack(&eb_ptr->tipc_packet_type);
+}
+
+/**
  * enable_bearer - attach TIPC bearer to an Ethernet interface
  */
 static int enable_bearer(struct tipc_bearer *tb_ptr)
@@ -173,7 +195,8 @@
 	eb_ptr->tipc_packet_type.func = recv_msg;
 	eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
 	INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
-	dev_add_pack(&eb_ptr->tipc_packet_type);
+	INIT_WORK(&eb_ptr->setup, setup_bearer);
+	schedule_work(&eb_ptr->setup);
 
 	/* Associate TIPC bearer with Ethernet bearer */
 	eb_ptr->bearer = tb_ptr;
@@ -357,8 +380,6 @@
 	if (res)
 		return res;
 
-	notifier.notifier_call = &recv_notification;
-	notifier.priority = 0;
 	res = register_netdevice_notifier(&notifier);
 	if (!res)
 		eth_started = 1;
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 7a52d39..111ff83 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -45,7 +45,7 @@
 static struct kmem_cache *tipc_queue_item_cache;
 static struct list_head signal_queue_head;
 static DEFINE_SPINLOCK(qitem_lock);
-static int handler_enabled;
+static int handler_enabled __read_mostly;
 
 static void process_signal_queue(unsigned long dummy);
 
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1c1e615..a79c755 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -210,9 +210,7 @@
 	    (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
 	    (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
 	    (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
-	    (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
-	    (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
-	    (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
+	    (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME))
 		return 0;
 
 	/* return link name components, if necessary */
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 360c478..98975e8 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -41,7 +41,7 @@
 #include "subscr.h"
 #include "port.h"
 
-static int tipc_nametbl_size = 1024;		/* must be a power of 2 */
+#define TIPC_NAMETBL_SIZE 1024		/* must be a power of 2 */
 
 /**
  * struct name_info - name sequence publication info
@@ -114,7 +114,7 @@
 
 static int hash(int x)
 {
-	return x & (tipc_nametbl_size - 1);
+	return x & (TIPC_NAMETBL_SIZE - 1);
 }
 
 /**
@@ -667,9 +667,9 @@
 {
 	struct publication *publ;
 
-	if (table.local_publ_count >= tipc_max_publications) {
+	if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
 		pr_warn("Publication failed, local publication limit reached (%u)\n",
-			tipc_max_publications);
+			TIPC_MAX_PUBLICATIONS);
 		return NULL;
 	}
 
@@ -871,7 +871,7 @@
 		ret += nametbl_header(buf, len, depth);
 		lowbound = 0;
 		upbound = ~0;
-		for (i = 0; i < tipc_nametbl_size; i++) {
+		for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
 			seq_head = &table.types[i];
 			hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
 				ret += nameseq_list(seq, buf + ret, len - ret,
@@ -935,7 +935,7 @@
 
 int tipc_nametbl_init(void)
 {
-	table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head),
+	table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head),
 			      GFP_ATOMIC);
 	if (!table.types)
 		return -ENOMEM;
@@ -953,7 +953,7 @@
 
 	/* Verify name table is empty, then release it */
 	write_lock_bh(&tipc_nametbl_lock);
-	for (i = 0; i < tipc_nametbl_size; i++) {
+	for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
 		if (hlist_empty(&table.types[i]))
 			continue;
 		pr_err("nametbl_stop(): orphaned hash chain detected\n");
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 5b5cea2..7d305ec 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -171,7 +171,7 @@
 	tipc_link_send(buf, dnode, msg_link_selector(msg));
 }
 
-int tipc_net_start(u32 addr)
+void tipc_net_start(u32 addr)
 {
 	char addr_string[16];
 
@@ -187,7 +187,6 @@
 	pr_info("Started in network mode\n");
 	pr_info("Own node address %s, network identity %u\n",
 		tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
-	return 0;
 }
 
 void tipc_net_stop(void)
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 9eb4b9e..079daad 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -41,7 +41,7 @@
 
 void tipc_net_route_msg(struct sk_buff *buf);
 
-int tipc_net_start(u32 addr);
+void tipc_net_start(u32 addr);
 void tipc_net_stop(void);
 
 #endif
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 5ed5965..0f7d0d0 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -304,9 +304,9 @@
 	}
 
 	/* Refuse subscription if global limit exceeded */
-	if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
+	if (atomic_read(&topsrv.subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
 		pr_warn("Subscription rejected, limit reached (%u)\n",
-			tipc_max_subscriptions);
+			TIPC_MAX_SUBSCRIPTIONS);
 		subscr_terminate(subscriber);
 		return NULL;
 	}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e4768c1..c5ee4ff 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1450,7 +1450,7 @@
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
 	wait_for_unix_gc();
-	err = scm_send(sock, msg, siocb->scm);
+	err = scm_send(sock, msg, siocb->scm, false);
 	if (err < 0)
 		return err;
 
@@ -1619,7 +1619,7 @@
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
 	wait_for_unix_gc();
-	err = scm_send(sock, msg, siocb->scm);
+	err = scm_send(sock, msg, siocb->scm, false);
 	if (err < 0)
 		return err;
 
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index d355f67..2f876b9 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -105,7 +105,7 @@
 
 	ASSERT_WDEV_LOCK(wdev);
 
-	if (!netif_running(wdev->netdev))
+	if (wdev->netdev && !netif_running(wdev->netdev))
 		return;
 
 	switch (wdev->iftype) {
@@ -143,6 +143,11 @@
 	case NL80211_IFTYPE_WDS:
 		/* these interface types don't really have a channel */
 		return;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		if (wdev->wiphy->features &
+				NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
+			*chanmode = CHAN_MODE_EXCLUSIVE;
+		return;
 	case NL80211_IFTYPE_UNSPECIFIED:
 	case NUM_NL80211_IFTYPES:
 		WARN_ON(1);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 31b40cc..443d4d7 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -230,9 +230,24 @@
 	rtnl_lock();
 	mutex_lock(&rdev->devlist_mtx);
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list)
-		if (wdev->netdev)
+	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+		if (wdev->netdev) {
 			dev_close(wdev->netdev);
+			continue;
+		}
+		/* otherwise, check iftype */
+		switch (wdev->iftype) {
+		case NL80211_IFTYPE_P2P_DEVICE:
+			if (!wdev->p2p_started)
+				break;
+			rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+			wdev->p2p_started = false;
+			rdev->opencount--;
+			break;
+		default:
+			break;
+		}
+	}
 
 	mutex_unlock(&rdev->devlist_mtx);
 	rtnl_unlock();
@@ -407,6 +422,11 @@
 			if (WARN_ON(wiphy->software_iftypes & types))
 				return -EINVAL;
 
+			/* Only a single P2P_DEVICE can be allowed */
+			if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
+				    c->limits[j].max > 1))
+				return -EINVAL;
+
 			cnt += c->limits[j].max;
 			/*
 			 * Don't advertise an unsupported type
@@ -734,6 +754,35 @@
 	dev_put(wdev->netdev);
 }
 
+void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+{
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+	ASSERT_RTNL();
+
+	if (WARN_ON(wdev->netdev))
+		return;
+
+	mutex_lock(&rdev->devlist_mtx);
+	list_del_rcu(&wdev->list);
+	rdev->devlist_generation++;
+
+	switch (wdev->iftype) {
+	case NL80211_IFTYPE_P2P_DEVICE:
+		if (!wdev->p2p_started)
+			break;
+		rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+		wdev->p2p_started = false;
+		rdev->opencount--;
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+	mutex_unlock(&rdev->devlist_mtx);
+}
+EXPORT_SYMBOL(cfg80211_unregister_wdev);
+
 static struct device_type wiphy_type = {
 	.name	= "wlan",
 };
@@ -952,6 +1001,11 @@
 		 */
 		synchronize_rcu();
 		INIT_LIST_HEAD(&wdev->list);
+		/*
+		 * Ensure that all events have been processed and
+		 * freed.
+		 */
+		cfg80211_process_wdev_events(wdev);
 		break;
 	case NETDEV_PRE_UP:
 		if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5206c68..bc7430b 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -426,6 +426,7 @@
 			  struct net_device *dev, enum nl80211_iftype ntype,
 			  u32 *flags, struct vif_params *params);
 void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+void cfg80211_process_wdev_events(struct wireless_dev *wdev);
 
 int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
 				 struct wireless_dev *wdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1cdb1d5..8fd0242 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -736,7 +736,6 @@
 			  const u8 *buf, size_t len, bool no_cck,
 			  bool dont_wait_for_ack, u64 *cookie)
 {
-	struct net_device *dev = wdev->netdev;
 	const struct ieee80211_mgmt *mgmt;
 	u16 stype;
 
@@ -796,7 +795,7 @@
 		case NL80211_IFTYPE_AP:
 		case NL80211_IFTYPE_P2P_GO:
 		case NL80211_IFTYPE_AP_VLAN:
-			if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
+			if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
 				err = -EINVAL;
 			break;
 		case NL80211_IFTYPE_MESH_POINT:
@@ -809,6 +808,11 @@
 			 * cfg80211 doesn't track the stations
 			 */
 			break;
+		case NL80211_IFTYPE_P2P_DEVICE:
+			/*
+			 * fall through, P2P device only supports
+			 * public action frames
+			 */
 		default:
 			err = -EOPNOTSUPP;
 			break;
@@ -819,7 +823,7 @@
 			return err;
 	}
 
-	if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
+	if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
 		return -EINVAL;
 
 	/* Transmit the Action frame as requested by user space */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 97026f3..787aeaa 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1100,6 +1100,7 @@
 		if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
 			goto nla_put_failure;
 	}
+	CMD(start_p2p_device, START_P2P_DEVICE);
 
 #ifdef CONFIG_NL80211_TESTMODE
 	CMD(testmode_cmd, TESTMODE);
@@ -1748,13 +1749,13 @@
 
 	if (dev &&
 	    (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
-	     nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
-	     nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
+	     nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name)))
 		goto nla_put_failure;
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
 	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
 	    nla_put_u32(msg, NL80211_ATTR_GENERATION,
 			rdev->devlist_generation ^
 			(cfg80211_rdev_list_generation << 2)))
@@ -2021,8 +2022,10 @@
 		return PTR_ERR(wdev);
 	}
 
-	if (type == NL80211_IFTYPE_MESH_POINT &&
-	    info->attrs[NL80211_ATTR_MESH_ID]) {
+	switch (type) {
+	case NL80211_IFTYPE_MESH_POINT:
+		if (!info->attrs[NL80211_ATTR_MESH_ID])
+			break;
 		wdev_lock(wdev);
 		BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
 			     IEEE80211_MAX_MESH_ID_LEN);
@@ -2031,6 +2034,26 @@
 		memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
 		       wdev->mesh_id_up_len);
 		wdev_unlock(wdev);
+		break;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		/*
+		 * P2P Device doesn't have a netdev, so doesn't go
+		 * through the netdev notifier and must be added here
+		 */
+		mutex_init(&wdev->mtx);
+		INIT_LIST_HEAD(&wdev->event_list);
+		spin_lock_init(&wdev->event_lock);
+		INIT_LIST_HEAD(&wdev->mgmt_registrations);
+		spin_lock_init(&wdev->mgmt_registrations_lock);
+
+		mutex_lock(&rdev->devlist_mtx);
+		wdev->identifier = ++rdev->wdev_id;
+		list_add_rcu(&wdev->list, &rdev->wdev_list);
+		rdev->devlist_generation++;
+		mutex_unlock(&rdev->devlist_mtx);
+		break;
+	default:
+		break;
 	}
 
 	if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
@@ -6053,6 +6076,7 @@
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_MESH_POINT:
 	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		break;
 	default:
 		return -EOPNOTSUPP;
@@ -6099,6 +6123,7 @@
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_MESH_POINT:
 	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		break;
 	default:
 		return -EOPNOTSUPP;
@@ -6195,6 +6220,7 @@
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		break;
 	default:
 		return -EOPNOTSUPP;
@@ -6810,6 +6836,68 @@
 	return 0;
 }
 
+static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct wireless_dev *wdev = info->user_ptr[1];
+	int err;
+
+	if (!rdev->ops->start_p2p_device)
+		return -EOPNOTSUPP;
+
+	if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+		return -EOPNOTSUPP;
+
+	if (wdev->p2p_started)
+		return 0;
+
+	mutex_lock(&rdev->devlist_mtx);
+	err = cfg80211_can_add_interface(rdev, wdev->iftype);
+	mutex_unlock(&rdev->devlist_mtx);
+	if (err)
+		return err;
+
+	err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
+	if (err)
+		return err;
+
+	wdev->p2p_started = true;
+	mutex_lock(&rdev->devlist_mtx);
+	rdev->opencount++;
+	mutex_unlock(&rdev->devlist_mtx);
+
+	return 0;
+}
+
+static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct wireless_dev *wdev = info->user_ptr[1];
+
+	if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+		return -EOPNOTSUPP;
+
+	if (!rdev->ops->stop_p2p_device)
+		return -EOPNOTSUPP;
+
+	if (!wdev->p2p_started)
+		return 0;
+
+	rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+	wdev->p2p_started = false;
+
+	mutex_lock(&rdev->devlist_mtx);
+	rdev->opencount--;
+	mutex_unlock(&rdev->devlist_mtx);
+
+	if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
+		rdev->scan_req->aborted = true;
+		___cfg80211_scan_done(rdev, true);
+	}
+
+	return 0;
+}
+
 #define NL80211_FLAG_NEED_WIPHY		0x01
 #define NL80211_FLAG_NEED_NETDEV	0x02
 #define NL80211_FLAG_NEED_RTNL		0x04
@@ -6817,7 +6905,7 @@
 #define NL80211_FLAG_NEED_NETDEV_UP	(NL80211_FLAG_NEED_NETDEV |\
 					 NL80211_FLAG_CHECK_NETDEV_UP)
 #define NL80211_FLAG_NEED_WDEV		0x10
-/* If a netdev is associated, it must be UP */
+/* If a netdev is associated, it must be UP, P2P must be started */
 #define NL80211_FLAG_NEED_WDEV_UP	(NL80211_FLAG_NEED_WDEV |\
 					 NL80211_FLAG_CHECK_NETDEV_UP)
 
@@ -6878,6 +6966,13 @@
 			}
 
 			dev_hold(dev);
+		} else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
+			if (!wdev->p2p_started) {
+				mutex_unlock(&cfg80211_mutex);
+				if (rtnl)
+					rtnl_unlock();
+				return -ENETDOWN;
+			}
 		}
 
 		cfg80211_lock_rdev(rdev);
@@ -7439,7 +7534,22 @@
 		.internal_flags = NL80211_FLAG_NEED_NETDEV |
 				  NL80211_FLAG_NEED_RTNL,
 	},
-
+	{
+		.cmd = NL80211_CMD_START_P2P_DEVICE,
+		.doit = nl80211_start_p2p_device,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_WDEV |
+				  NL80211_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL80211_CMD_STOP_P2P_DEVICE,
+		.doit = nl80211_stop_p2p_device,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+				  NL80211_FLAG_NEED_RTNL,
+	},
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index c4ad795..7d604c0 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -41,6 +41,8 @@
 	[IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
 	[IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
 	[IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
+	[IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, },
+	[IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, },
 	/*
 	 * add more here as they are defined in radiotap.h
 	 */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2303ee7..2ded3c7 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -680,6 +680,8 @@
 		channel_flags |= IEEE80211_CHAN_NO_IBSS;
 	if (rd_flags & NL80211_RRF_DFS)
 		channel_flags |= IEEE80211_CHAN_RADAR;
+	if (rd_flags & NL80211_RRF_NO_OFDM)
+		channel_flags |= IEEE80211_CHAN_NO_OFDM;
 	return channel_flags;
 }
 
@@ -901,7 +903,21 @@
 	chan->max_antenna_gain = min(chan->orig_mag,
 		(int) MBI_TO_DBI(power_rule->max_antenna_gain));
 	chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
-	chan->max_power = min(chan->max_power, chan->max_reg_power);
+	if (chan->orig_mpwr) {
+		/*
+		 * Devices that have their own custom regulatory domain
+		 * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
+		 * passed country IE power settings.
+		 */
+		if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+		    wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+		    wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
+			chan->max_power = chan->max_reg_power;
+		else
+			chan->max_power = min(chan->orig_mpwr,
+					      chan->max_reg_power);
+	} else
+		chan->max_power = chan->max_reg_power;
 }
 
 static void handle_band(struct wiphy *wiphy,
@@ -1885,6 +1901,7 @@
 			chan->flags = chan->orig_flags;
 			chan->max_antenna_gain = chan->orig_mag;
 			chan->max_power = chan->orig_mpwr;
+			chan->beacon_found = false;
 		}
 	}
 }
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 26f8cd3..ef35f4e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -684,22 +684,10 @@
 
 const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
 {
-	u8 *end, *pos;
-
-	pos = bss->information_elements;
-	if (pos == NULL)
+	if (bss->information_elements == NULL)
 		return NULL;
-	end = pos + bss->len_information_elements;
-
-	while (pos + 1 < end) {
-		if (pos + 2 + pos[1] > end)
-			break;
-		if (pos[0] == ie)
-			return pos;
-		pos += 2 + pos[1];
-	}
-
-	return NULL;
+	return cfg80211_find_ie(ie, bss->information_elements,
+				 bss->len_information_elements);
 }
 EXPORT_SYMBOL(ieee80211_bss_get_ie);
 
@@ -735,7 +723,7 @@
 	wdev->connect_keys = NULL;
 }
 
-static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
+void cfg80211_process_wdev_events(struct wireless_dev *wdev)
 {
 	struct cfg80211_event *ev;
 	unsigned long flags;
@@ -812,6 +800,10 @@
 	if (otype == NL80211_IFTYPE_AP_VLAN)
 		return -EOPNOTSUPP;
 
+	/* cannot change into P2P device type */
+	if (ntype == NL80211_IFTYPE_P2P_DEVICE)
+		return -EOPNOTSUPP;
+
 	if (!rdev->ops->change_virtual_intf ||
 	    !(rdev->wiphy.interface_modes & (1 << ntype)))
 		return -EOPNOTSUPP;
@@ -889,6 +881,9 @@
 		case NUM_NL80211_IFTYPES:
 			/* not happening */
 			break;
+		case NL80211_IFTYPE_P2P_DEVICE:
+			WARN_ON(1);
+			break;
 		}
 	}
 
@@ -1053,8 +1048,15 @@
 	list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
 		if (wdev_iter == wdev)
 			continue;
-		if (!netif_running(wdev_iter->netdev))
-			continue;
+		if (wdev_iter->netdev) {
+			if (!netif_running(wdev_iter->netdev))
+				continue;
+		} else if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+			if (!wdev_iter->p2p_started)
+				continue;
+		} else {
+			WARN_ON(1);
+		}
 
 		if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
 			continue;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c5a5165..741a32a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -42,13 +42,12 @@
 static struct dst_entry *xfrm_policy_sk_bundles;
 static DEFINE_RWLOCK(xfrm_policy_lock);
 
-static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
-static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
+static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
+static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
+						__read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
 static void xfrm_init_pmtu(struct dst_entry *dst);
 static int stale_bundle(struct dst_entry *dst);
 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
@@ -95,6 +94,24 @@
 	return false;
 }
 
+static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
+{
+	struct xfrm_policy_afinfo *afinfo;
+
+	if (unlikely(family >= NPROTO))
+		return NULL;
+	rcu_read_lock();
+	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
+	if (unlikely(!afinfo))
+		rcu_read_unlock();
+	return afinfo;
+}
+
+static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
+{
+	rcu_read_unlock();
+}
+
 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
 						  const xfrm_address_t *saddr,
 						  const xfrm_address_t *daddr,
@@ -1357,6 +1374,8 @@
 
 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
 		xdst->flo.ops = &xfrm_bundle_fc_ops;
+		if (afinfo->init_dst)
+			afinfo->init_dst(net, xdst);
 	} else
 		xdst = ERR_PTR(-ENOBUFS);
 
@@ -2418,7 +2437,7 @@
 		return -EINVAL;
 	if (unlikely(afinfo->family >= NPROTO))
 		return -EAFNOSUPPORT;
-	write_lock_bh(&xfrm_policy_afinfo_lock);
+	spin_lock(&xfrm_policy_afinfo_lock);
 	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
 		err = -ENOBUFS;
 	else {
@@ -2439,9 +2458,9 @@
 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
 		if (likely(afinfo->garbage_collect == NULL))
 			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
-		xfrm_policy_afinfo[afinfo->family] = afinfo;
+		rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
 	}
-	write_unlock_bh(&xfrm_policy_afinfo_lock);
+	spin_unlock(&xfrm_policy_afinfo_lock);
 
 	rtnl_lock();
 	for_each_net(net) {
@@ -2474,21 +2493,26 @@
 		return -EINVAL;
 	if (unlikely(afinfo->family >= NPROTO))
 		return -EAFNOSUPPORT;
-	write_lock_bh(&xfrm_policy_afinfo_lock);
+	spin_lock(&xfrm_policy_afinfo_lock);
 	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
 		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
 			err = -EINVAL;
-		else {
-			struct dst_ops *dst_ops = afinfo->dst_ops;
-			xfrm_policy_afinfo[afinfo->family] = NULL;
-			dst_ops->kmem_cachep = NULL;
-			dst_ops->check = NULL;
-			dst_ops->negative_advice = NULL;
-			dst_ops->link_failure = NULL;
-			afinfo->garbage_collect = NULL;
-		}
+		else
+			RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
+					 NULL);
 	}
-	write_unlock_bh(&xfrm_policy_afinfo_lock);
+	spin_unlock(&xfrm_policy_afinfo_lock);
+	if (!err) {
+		struct dst_ops *dst_ops = afinfo->dst_ops;
+
+		synchronize_rcu();
+
+		dst_ops->kmem_cachep = NULL;
+		dst_ops->check = NULL;
+		dst_ops->negative_advice = NULL;
+		dst_ops->link_failure = NULL;
+		afinfo->garbage_collect = NULL;
+	}
 	return err;
 }
 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
@@ -2497,33 +2521,16 @@
 {
 	struct xfrm_policy_afinfo *afinfo;
 
-	read_lock_bh(&xfrm_policy_afinfo_lock);
-	afinfo = xfrm_policy_afinfo[AF_INET];
+	rcu_read_lock();
+	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
 	if (afinfo)
 		net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
 #if IS_ENABLED(CONFIG_IPV6)
-	afinfo = xfrm_policy_afinfo[AF_INET6];
+	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
 	if (afinfo)
 		net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
 #endif
-	read_unlock_bh(&xfrm_policy_afinfo_lock);
-}
-
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
-{
-	struct xfrm_policy_afinfo *afinfo;
-	if (unlikely(family >= NPROTO))
-		return NULL;
-	read_lock(&xfrm_policy_afinfo_lock);
-	afinfo = xfrm_policy_afinfo[family];
-	if (unlikely(!afinfo))
-		read_unlock(&xfrm_policy_afinfo_lock);
-	return afinfo;
-}
-
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
-{
-	read_unlock(&xfrm_policy_afinfo_lock);
+	rcu_read_unlock();
 }
 
 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5b228f9..7856c33 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -415,8 +415,17 @@
 	if (x->lft.hard_add_expires_seconds) {
 		long tmo = x->lft.hard_add_expires_seconds +
 			x->curlft.add_time - now;
-		if (tmo <= 0)
-			goto expired;
+		if (tmo <= 0) {
+			if (x->xflags & XFRM_SOFT_EXPIRE) {
+				/* enter hard expire without soft expire first?!
+				 * setting a new date could trigger this.
+				 * workarbound: fix x->curflt.add_time by below:
+				 */
+				x->curlft.add_time = now - x->saved_tmo - 1;
+				tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
+			} else
+				goto expired;
+		}
 		if (tmo < next)
 			next = tmo;
 	}
@@ -433,10 +442,14 @@
 	if (x->lft.soft_add_expires_seconds) {
 		long tmo = x->lft.soft_add_expires_seconds +
 			x->curlft.add_time - now;
-		if (tmo <= 0)
+		if (tmo <= 0) {
 			warn = 1;
-		else if (tmo < next)
+			x->xflags &= ~XFRM_SOFT_EXPIRE;
+		} else if (tmo < next) {
 			next = tmo;
+			x->xflags |= XFRM_SOFT_EXPIRE;
+			x->saved_tmo = tmo;
+		}
 	}
 	if (x->lft.soft_use_expires_seconds) {
 		long tmo = x->lft.soft_use_expires_seconds +
@@ -1687,7 +1700,7 @@
 
 	read_lock(&xfrm_km_lock);
 	list_for_each_entry(km, &xfrm_km_list, list) {
-		acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
+		acqret = km->acquire(x, t, pol);
 		if (!acqret)
 			err = acqret;
 	}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index e75d8e4..ab58034 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2567,8 +2567,7 @@
 }
 
 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
-			 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
-			 int dir)
+			 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
 {
 	__u32 seq = xfrm_get_acqseq();
 	struct xfrm_user_acquire *ua;
@@ -2583,7 +2582,7 @@
 	memcpy(&ua->id, &x->id, sizeof(ua->id));
 	memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
 	memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
-	copy_to_user_policy(xp, &ua->policy, dir);
+	copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
 	ua->aalgos = xt->aalgos;
 	ua->ealgos = xt->ealgos;
 	ua->calgos = xt->calgos;
@@ -2605,7 +2604,7 @@
 }
 
 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
-			     struct xfrm_policy *xp, int dir)
+			     struct xfrm_policy *xp)
 {
 	struct net *net = xs_net(x);
 	struct sk_buff *skb;
@@ -2614,7 +2613,7 @@
 	if (skb == NULL)
 		return -ENOMEM;
 
-	if (build_acquire(skb, x, xt, xp, dir) < 0)
+	if (build_acquire(skb, x, xt, xp) < 0)
 		BUG();
 
 	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 913d6bd..ca05ba2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3016,7 +3016,8 @@
 					$herectx .= raw_line($linenr, $n) . "\n";
 				}
 
-				if (($stmts =~ tr/;/;/) == 1) {
+				if (($stmts =~ tr/;/;/) == 1 &&
+				    $stmts !~ /^\s*(if|while|for|switch)\b/) {
 					WARN("SINGLE_STATEMENT_DO_WHILE_MACRO",
 					     "Single statement macros should not use a do {} while (0) loop\n" . "$herectx");
 				}
diff --git a/scripts/decodecode b/scripts/decodecode
index 18ba881..4f8248d 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -89,7 +89,7 @@
 disas $T
 cat $T.dis >> $T.aa
 
-faultline=`cat $T.dis | head -1 | cut -d":" -f2`
+faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
 faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
 
 cat $T.oo | sed -e "s/\($faultline\)/\*\1     <-- trapping instruction/g"
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 9b0c0b8..8fd107a 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1786,6 +1786,7 @@
     $prototype =~ s/__init +//;
     $prototype =~ s/__init_or_module +//;
     $prototype =~ s/__must_check +//;
+    $prototype =~ s/__weak +//;
     $prototype =~ s/^#\s*define\s+//; #ak added
     $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
 
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 83554ee..0cc99a3 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -279,12 +279,46 @@
 	}
 
 	if (rc) {
-		char name[sizeof(current->comm)];
 		printk_ratelimited(KERN_NOTICE
 			"ptrace of pid %d was attempted by: %s (pid %d)\n",
-			child->pid,
-			get_task_comm(name, current),
-			current->pid);
+			child->pid, current->comm, current->pid);
+	}
+
+	return rc;
+}
+
+/**
+ * yama_ptrace_traceme - validate PTRACE_TRACEME calls
+ * @parent: task that will become the ptracer of the current task
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+static int yama_ptrace_traceme(struct task_struct *parent)
+{
+	int rc;
+
+	/* If standard caps disallows it, so does Yama.  We should
+	 * only tighten restrictions further.
+	 */
+	rc = cap_ptrace_traceme(parent);
+	if (rc)
+		return rc;
+
+	/* Only disallow PTRACE_TRACEME on more aggressive settings. */
+	switch (ptrace_scope) {
+	case YAMA_SCOPE_CAPABILITY:
+		if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
+			rc = -EPERM;
+		break;
+	case YAMA_SCOPE_NO_ATTACH:
+		rc = -EPERM;
+		break;
+	}
+
+	if (rc) {
+		printk_ratelimited(KERN_NOTICE
+			"ptraceme of pid %d was attempted by: %s (pid %d)\n",
+			current->pid, parent->comm, parent->pid);
 	}
 
 	return rc;
@@ -294,6 +328,7 @@
 	.name =			"yama",
 
 	.ptrace_access_check =	yama_ptrace_access_check,
+	.ptrace_traceme =	yama_ptrace_traceme,
 	.task_prctl =		yama_task_prctl,
 	.task_free =		yama_task_free,
 };
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 0d7b25e..4e1fda7 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -106,7 +106,7 @@
 	.prepare		= pxa2xx_ac97_pcm_prepare,
 };
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 
 static int pxa2xx_ac97_do_suspend(struct snd_card *card)
 {
@@ -243,7 +243,7 @@
 	.driver		= {
 		.name	= "pxa2xx-ac97",
 		.owner	= THIS_MODULE,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 		.pm	= &pxa2xx_ac97_pm_ops,
 #endif
 	},
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index eb4ceb7..277ebce 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -452,6 +452,7 @@
 	dac->regs = ioremap(regs->start, resource_size(regs));
 	if (!dac->regs) {
 		dev_dbg(&pdev->dev, "could not remap register memory\n");
+		retval = -ENOMEM;
 		goto out_free_card;
 	}
 
@@ -534,7 +535,7 @@
 	return retval;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int atmel_abdac_suspend(struct device *pdev)
 {
 	struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index bf47025..9052aff 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -278,14 +278,9 @@
 	if (retval < 0)
 		return retval;
 	/* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
-	if (cpu_is_at32ap7000()) {
-		if (retval < 0)
-			return retval;
-		/* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
-		if (retval == 1)
-			if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
-				dw_dma_cyclic_free(chip->dma.rx_chan);
-	}
+	if (cpu_is_at32ap7000() && retval == 1)
+		if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
+			dw_dma_cyclic_free(chip->dma.rx_chan);
 
 	/* Set restrictions to params. */
 	mutex_lock(&opened_mutex);
@@ -980,6 +975,7 @@
 
 	if (!chip->regs) {
 		dev_dbg(&pdev->dev, "could not remap register memory\n");
+		retval = -ENOMEM;
 		goto err_ioremap;
 	}
 
@@ -1134,7 +1130,7 @@
 	return retval;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int atmel_ac97c_suspend(struct device *pdev)
 {
 	struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 4e7ec2b..d0f0035 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -101,7 +101,7 @@
 		if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
 						 chunk, &tmpb) < 0) {
 			if (!sgbuf->pages)
-				return NULL;
+				goto _failed;
 			if (!res_size)
 				goto _failed;
 			size = sgbuf->pages * PAGE_SIZE;
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 1128b35..5a34355 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -1176,7 +1176,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int loopback_suspend(struct device *pdev)
 {
 	struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index f7d3bfc..54bb664 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -1064,7 +1064,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int snd_dummy_suspend(struct device *pdev)
 {
 	struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 6ca59fc..ef17129 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -199,7 +199,7 @@
 	pcspkr_stop_sound();
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int pcsp_suspend(struct device *dev)
 {
 	struct snd_pcsp *chip = dev_get_drvdata(dev);
@@ -212,7 +212,7 @@
 #define PCSP_PM_OPS	&pcsp_pm
 #else
 #define PCSP_PM_OPS	NULL
-#endif	/* CONFIG_PM */
+#endif	/* CONFIG_PM_SLEEP */
 
 static void pcsp_shutdown(struct platform_device *dev)
 {
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index 2d67c78..f7cdaf5 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -233,7 +233,7 @@
 			irq[dev], dma8[dev], dma16[dev]);
 	}
 
-	if ((error = snd_sb16dsp_pcm(chip, 0, NULL)) < 0) {
+	if ((error = snd_sb16dsp_pcm(chip, 0, &chip->pcm)) < 0) {
 		snd_card_free(card);
 		return error;
 	}
diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
index 733b014..b2b3c01 100644
--- a/sound/oss/sb_audio.c
+++ b/sound/oss/sb_audio.c
@@ -575,13 +575,15 @@
 	if (speed > 0)
 	{
 		int tmp;
-		int s = speed * devc->channels;
+		int s;
 
 		if (speed < 5000)
 			speed = 5000;
 		if (speed > 44100)
 			speed = 44100;
 
+		s = speed * devc->channels;
+
 		devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff;
 
 		tmp = 256 - devc->tconst;
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index f75f5ff..a71d1c1 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -94,7 +94,7 @@
 
 	if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX &&
 		       codec_index != CS46XX_SECONDARY_CODEC_INDEX))
-		return -EINVAL;
+		return 0xffff;
 
 	chip->active_ctrl(chip, 1);
 
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index 8e40262..2f6e9c7 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -1725,8 +1725,10 @@
 	atc_connect_resources(atc);
 
 	atc->timer = ct_timer_new(atc);
-	if (!atc->timer)
+	if (!atc->timer) {
+		err = -ENOMEM;
 		goto error1;
+	}
 
 	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, atc, &ops);
 	if (err < 0)
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 4f502a2..0a43662 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -326,7 +326,10 @@
 	for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
 		unsigned long ofs = idx << PAGE_SHIFT;
 		dma_addr_t addr;
-		addr = snd_pcm_sgbuf_get_addr(substream, ofs);
+		if (ofs >= runtime->dma_bytes)
+			addr = emu->silent_page.addr;
+		else
+			addr = snd_pcm_sgbuf_get_addr(substream, ofs);
 		if (! is_valid_page(emu, addr)) {
 			printk(KERN_ERR "emu: failure page = %d\n", idx);
 			mutex_unlock(&hdr->block_mutex);
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 647218d..4f7d2df 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -332,13 +332,12 @@
 	if (cfg->dig_outs)
 		snd_printd("   dig-out=0x%x/0x%x\n",
 			   cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
-	snd_printd("   inputs:");
+	snd_printd("   inputs:\n");
 	for (i = 0; i < cfg->num_inputs; i++) {
-		snd_printd(" %s=0x%x",
+		snd_printd("     %s=0x%x\n",
 			    hda_get_autocfg_input_label(codec, cfg, i),
 			    cfg->inputs[i].pin);
 	}
-	snd_printd("\n");
 	if (cfg->dig_in_pin)
 		snd_printd("   dig-in=0x%x\n", cfg->dig_in_pin);
 
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 0bc2315..0849aac 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -231,16 +231,22 @@
 }
 EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device);
 
+static bool ctl_has_mute(struct snd_kcontrol *kcontrol)
+{
+	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+	return query_amp_caps(codec, get_amp_nid(kcontrol),
+			      get_amp_direction(kcontrol)) & AC_AMPCAP_MUTE;
+}
+
 /* get/put callbacks for beep mute mixer switches */
 int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
 				      struct snd_ctl_elem_value *ucontrol)
 {
 	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
 	struct hda_beep *beep = codec->beep;
-	if (beep) {
+	if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) {
 		ucontrol->value.integer.value[0] =
-			ucontrol->value.integer.value[1] =
-			beep->enabled;
+			ucontrol->value.integer.value[1] = beep->enabled;
 		return 0;
 	}
 	return snd_hda_mixer_amp_switch_get(kcontrol, ucontrol);
@@ -252,9 +258,20 @@
 {
 	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
 	struct hda_beep *beep = codec->beep;
-	if (beep)
-		snd_hda_enable_beep_device(codec,
-					   *ucontrol->value.integer.value);
+	if (beep) {
+		u8 chs = get_amp_channels(kcontrol);
+		int enable = 0;
+		long *valp = ucontrol->value.integer.value;
+		if (chs & 1) {
+			enable |= *valp;
+			valp++;
+		}
+		if (chs & 2)
+			enable |= *valp;
+		snd_hda_enable_beep_device(codec, enable);
+	}
+	if (!ctl_has_mute(kcontrol))
+		return 0;
 	return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
 }
 EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 88a9c20..f560051 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1386,6 +1386,44 @@
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_configure);
 
+/* update the stream-id if changed */
+static void update_pcm_stream_id(struct hda_codec *codec,
+				 struct hda_cvt_setup *p, hda_nid_t nid,
+				 u32 stream_tag, int channel_id)
+{
+	unsigned int oldval, newval;
+
+	if (p->stream_tag != stream_tag || p->channel_id != channel_id) {
+		oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
+		newval = (stream_tag << 4) | channel_id;
+		if (oldval != newval)
+			snd_hda_codec_write(codec, nid, 0,
+					    AC_VERB_SET_CHANNEL_STREAMID,
+					    newval);
+		p->stream_tag = stream_tag;
+		p->channel_id = channel_id;
+	}
+}
+
+/* update the format-id if changed */
+static void update_pcm_format(struct hda_codec *codec, struct hda_cvt_setup *p,
+			      hda_nid_t nid, int format)
+{
+	unsigned int oldval;
+
+	if (p->format_id != format) {
+		oldval = snd_hda_codec_read(codec, nid, 0,
+					    AC_VERB_GET_STREAM_FORMAT, 0);
+		if (oldval != format) {
+			msleep(1);
+			snd_hda_codec_write(codec, nid, 0,
+					    AC_VERB_SET_STREAM_FORMAT,
+					    format);
+		}
+		p->format_id = format;
+	}
+}
+
 /**
  * snd_hda_codec_setup_stream - set up the codec for streaming
  * @codec: the CODEC to set up
@@ -1400,7 +1438,6 @@
 {
 	struct hda_codec *c;
 	struct hda_cvt_setup *p;
-	unsigned int oldval, newval;
 	int type;
 	int i;
 
@@ -1413,29 +1450,13 @@
 	p = get_hda_cvt_setup(codec, nid);
 	if (!p)
 		return;
-	/* update the stream-id if changed */
-	if (p->stream_tag != stream_tag || p->channel_id != channel_id) {
-		oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
-		newval = (stream_tag << 4) | channel_id;
-		if (oldval != newval)
-			snd_hda_codec_write(codec, nid, 0,
-					    AC_VERB_SET_CHANNEL_STREAMID,
-					    newval);
-		p->stream_tag = stream_tag;
-		p->channel_id = channel_id;
-	}
-	/* update the format-id if changed */
-	if (p->format_id != format) {
-		oldval = snd_hda_codec_read(codec, nid, 0,
-					    AC_VERB_GET_STREAM_FORMAT, 0);
-		if (oldval != format) {
-			msleep(1);
-			snd_hda_codec_write(codec, nid, 0,
-					    AC_VERB_SET_STREAM_FORMAT,
-					    format);
-		}
-		p->format_id = format;
-	}
+
+	if (codec->pcm_format_first)
+		update_pcm_format(codec, p, nid, format);
+	update_pcm_stream_id(codec, p, nid, stream_tag, channel_id);
+	if (!codec->pcm_format_first)
+		update_pcm_format(codec, p, nid, format);
+
 	p->active = 1;
 	p->dirty = 0;
 
@@ -3497,7 +3518,7 @@
 {
 	int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE);
 
-	if (sup < 0)
+	if (sup == -1)
 		return false;
 	if (sup & power_state)
 		return true;
@@ -4433,6 +4454,8 @@
 	 * then there is no need to go through power up here.
 	 */
 	if (codec->power_on) {
+		if (codec->power_transition < 0)
+			codec->power_transition = 0;
 		spin_unlock(&codec->power_lock);
 		return;
 	}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index c422d33..7fbc1bc 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -861,6 +861,7 @@
 	unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
 	unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
 	unsigned int no_jack_detect:1;	/* Machine has no jack-detection */
+	unsigned int pcm_format_first:1; /* PCM format must be set first */
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 	unsigned int power_on :1;	/* current (global) power-state */
 	int power_transition;	/* power-state in transition */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c8aced1..60882c6 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -151,6 +151,7 @@
 			 "{Intel, CPT},"
 			 "{Intel, PPT},"
 			 "{Intel, LPT},"
+			 "{Intel, LPT_LP},"
 			 "{Intel, HPT},"
 			 "{Intel, PBG},"
 			 "{Intel, SCH},"
@@ -3270,6 +3271,14 @@
 	{ PCI_DEVICE(0x8086, 0x8c20),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
 	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
+	/* Lynx Point-LP */
+	{ PCI_DEVICE(0x8086, 0x9c20),
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
+	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
+	/* Lynx Point-LP */
+	{ PCI_DEVICE(0x8086, 0x9c21),
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
+	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
 	/* Haswell */
 	{ PCI_DEVICE(0x8086, 0x0c0c),
 	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 7e46258..6894ec6 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -412,7 +412,7 @@
 	if (digi1 & AC_DIG1_EMPHASIS)
 		snd_iprintf(buffer, " Preemphasis");
 	if (digi1 & AC_DIG1_COPYRIGHT)
-		snd_iprintf(buffer, " Copyright");
+		snd_iprintf(buffer, " Non-Copyright");
 	if (digi1 & AC_DIG1_NONAUDIO)
 		snd_iprintf(buffer, " Non-Audio");
 	if (digi1 & AC_DIG1_PROFESSIONAL)
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index d0d3540..49750a9 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -246,7 +246,7 @@
 					    AC_VERB_SET_AMP_GAIN_MUTE,
 					    AMP_OUT_UNMUTE);
 	}
-	if (dac)
+	if (dac && (get_wcaps(codec, dac) & AC_WCAP_OUT_AMP))
 		snd_hda_codec_write(codec, dac, 0,
 				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO);
 }
@@ -261,7 +261,7 @@
 					    AC_VERB_SET_AMP_GAIN_MUTE,
 					    AMP_IN_UNMUTE(0));
 	}
-	if (adc)
+	if (adc && (get_wcaps(codec, adc) & AC_WCAP_IN_AMP))
 		snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE,
 				    AMP_IN_UNMUTE(0));
 }
@@ -275,6 +275,10 @@
 	int type = dir ? HDA_INPUT : HDA_OUTPUT;
 	struct snd_kcontrol_new knew =
 		HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type);
+	if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) {
+		snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid);
+		return 0;
+	}
 	sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
 	return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
 }
@@ -286,6 +290,10 @@
 	int type = dir ? HDA_INPUT : HDA_OUTPUT;
 	struct snd_kcontrol_new knew =
 		HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type);
+	if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) {
+		snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid);
+		return 0;
+	}
 	sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);
 	return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
 }
@@ -464,50 +472,17 @@
 }
 
 /*
- * PCM stuffs
- */
-static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid,
-				 u32 stream_tag,
-				 int channel_id, int format)
-{
-	unsigned int oldval, newval;
-
-	if (!nid)
-		return;
-
-	snd_printdd("ca0132_setup_stream: "
-		"NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
-		nid, stream_tag, channel_id, format);
-
-	/* update the format-id if changed */
-	oldval = snd_hda_codec_read(codec, nid, 0,
-				    AC_VERB_GET_STREAM_FORMAT,
-				    0);
-	if (oldval != format) {
-		msleep(20);
-		snd_hda_codec_write(codec, nid, 0,
-				    AC_VERB_SET_STREAM_FORMAT,
-				    format);
-	}
-
-	oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
-	newval = (stream_tag << 4) | channel_id;
-	if (oldval != newval) {
-		snd_hda_codec_write(codec, nid, 0,
-				    AC_VERB_SET_CHANNEL_STREAMID,
-				    newval);
-	}
-}
-
-static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
-{
-	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
-	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
-}
-
-/*
  * PCM callbacks
  */
+static int ca0132_playback_pcm_open(struct hda_pcm_stream *hinfo,
+				    struct hda_codec *codec,
+				    struct snd_pcm_substream *substream)
+{
+	struct ca0132_spec *spec = codec->spec;
+	return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
+					     hinfo);
+}
+
 static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
 			struct hda_codec *codec,
 			unsigned int stream_tag,
@@ -515,10 +490,8 @@
 			struct snd_pcm_substream *substream)
 {
 	struct ca0132_spec *spec = codec->spec;
-
-	ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
-
-	return 0;
+	return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
+						stream_tag, format, substream);
 }
 
 static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
@@ -526,15 +499,20 @@
 			struct snd_pcm_substream *substream)
 {
 	struct ca0132_spec *spec = codec->spec;
-
-	ca0132_cleanup_stream(codec, spec->dacs[0]);
-
-	return 0;
+	return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
 }
 
 /*
  * Digital out
  */
+static int ca0132_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
+					struct hda_codec *codec,
+					struct snd_pcm_substream *substream)
+{
+	struct ca0132_spec *spec = codec->spec;
+	return snd_hda_multi_out_dig_open(codec, &spec->multiout);
+}
+
 static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
 			struct hda_codec *codec,
 			unsigned int stream_tag,
@@ -542,10 +520,8 @@
 			struct snd_pcm_substream *substream)
 {
 	struct ca0132_spec *spec = codec->spec;
-
-	ca0132_setup_stream(codec, spec->dig_out, stream_tag, 0, format);
-
-	return 0;
+	return snd_hda_multi_out_dig_prepare(codec, &spec->multiout,
+					     stream_tag, format, substream);
 }
 
 static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
@@ -553,65 +529,15 @@
 			struct snd_pcm_substream *substream)
 {
 	struct ca0132_spec *spec = codec->spec;
-
-	ca0132_cleanup_stream(codec, spec->dig_out);
-
-	return 0;
+	return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
 }
 
-/*
- * Analog capture
- */
-static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
-			struct hda_codec *codec,
-			unsigned int stream_tag,
-			unsigned int format,
-			struct snd_pcm_substream *substream)
+static int ca0132_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
+					 struct hda_codec *codec,
+					 struct snd_pcm_substream *substream)
 {
 	struct ca0132_spec *spec = codec->spec;
-
-	ca0132_setup_stream(codec, spec->adcs[substream->number],
-			     stream_tag, 0, format);
-
-	return 0;
-}
-
-static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
-			struct hda_codec *codec,
-			struct snd_pcm_substream *substream)
-{
-	struct ca0132_spec *spec = codec->spec;
-
-	ca0132_cleanup_stream(codec, spec->adcs[substream->number]);
-
-	return 0;
-}
-
-/*
- * Digital capture
- */
-static int ca0132_dig_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
-			struct hda_codec *codec,
-			unsigned int stream_tag,
-			unsigned int format,
-			struct snd_pcm_substream *substream)
-{
-	struct ca0132_spec *spec = codec->spec;
-
-	ca0132_setup_stream(codec, spec->dig_in, stream_tag, 0, format);
-
-	return 0;
-}
-
-static int ca0132_dig_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
-			struct hda_codec *codec,
-			struct snd_pcm_substream *substream)
-{
-	struct ca0132_spec *spec = codec->spec;
-
-	ca0132_cleanup_stream(codec, spec->dig_in);
-
-	return 0;
+	return snd_hda_multi_out_dig_close(codec, &spec->multiout);
 }
 
 /*
@@ -621,6 +547,7 @@
 	.channels_min = 2,
 	.channels_max = 2,
 	.ops = {
+		.open = ca0132_playback_pcm_open,
 		.prepare = ca0132_playback_pcm_prepare,
 		.cleanup = ca0132_playback_pcm_cleanup
 	},
@@ -630,10 +557,6 @@
 	.substreams = 1,
 	.channels_min = 2,
 	.channels_max = 2,
-	.ops = {
-		.prepare = ca0132_capture_pcm_prepare,
-		.cleanup = ca0132_capture_pcm_cleanup
-	},
 };
 
 static struct hda_pcm_stream ca0132_pcm_digital_playback = {
@@ -641,6 +564,8 @@
 	.channels_min = 2,
 	.channels_max = 2,
 	.ops = {
+		.open = ca0132_dig_playback_pcm_open,
+		.close = ca0132_dig_playback_pcm_close,
 		.prepare = ca0132_dig_playback_pcm_prepare,
 		.cleanup = ca0132_dig_playback_pcm_cleanup
 	},
@@ -650,10 +575,6 @@
 	.substreams = 1,
 	.channels_min = 2,
 	.channels_max = 2,
-	.ops = {
-		.prepare = ca0132_dig_capture_pcm_prepare,
-		.cleanup = ca0132_dig_capture_pcm_cleanup
-	},
 };
 
 static int ca0132_build_pcms(struct hda_codec *codec)
@@ -928,18 +849,16 @@
 						    spec->dig_out);
 		if (err < 0)
 			return err;
-		err = add_out_volume(codec, spec->dig_out, "IEC958");
+		err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
 		if (err < 0)
 			return err;
+		/* spec->multiout.share_spdif = 1; */
 	}
 
 	if (spec->dig_in) {
 		err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
 		if (err < 0)
 			return err;
-		err = add_in_volume(codec, spec->dig_in, "IEC958");
-		if (err < 0)
-			return err;
 	}
 	return 0;
 }
@@ -961,6 +880,9 @@
 	struct ca0132_spec *spec = codec->spec;
 	struct auto_pin_cfg *cfg = &spec->autocfg;
 
+	codec->pcm_format_first = 1;
+	codec->no_sticky_stream = 1;
+
 	/* line-outs */
 	cfg->line_outs = 1;
 	cfg->line_out_pins[0] = 0x0b; /* front */
@@ -988,14 +910,24 @@
 
 	/* Mic-in */
 	spec->input_pins[0] = 0x12;
-	spec->input_labels[0] = "Mic-In";
+	spec->input_labels[0] = "Mic";
 	spec->adcs[0] = 0x07;
 
 	/* Line-In */
 	spec->input_pins[1] = 0x11;
-	spec->input_labels[1] = "Line-In";
+	spec->input_labels[1] = "Line";
 	spec->adcs[1] = 0x08;
 	spec->num_inputs = 2;
+
+	/* SPDIF I/O */
+	spec->dig_out = 0x05;
+	spec->multiout.dig_out_nid = spec->dig_out;
+	cfg->dig_out_pins[0] = 0x0c;
+	cfg->dig_outs = 1;
+	cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
+	spec->dig_in = 0x09;
+	cfg->dig_in_pin = 0x0e;
+	cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
 }
 
 static void ca0132_init_chip(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 1436118..5e22a8f 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2967,12 +2967,10 @@
 };
 
 static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
-	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
 	SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
-	SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
@@ -2988,14 +2986,10 @@
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
-	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
-	SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
 	SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
-	SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
-	SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
 	{}
 };
 
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 69b9284..8f23374 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -877,8 +877,6 @@
 	struct hdmi_eld *eld;
 	struct hdmi_spec_per_cvt *per_cvt = NULL;
 
-	hinfo->nid = 0; /* clear the leftover value */
-
 	/* Validate hinfo */
 	pin_idx = hinfo_to_pin_index(spec, hinfo);
 	if (snd_BUG_ON(pin_idx < 0))
@@ -1163,6 +1161,14 @@
 	return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
 }
 
+static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+					     struct hda_codec *codec,
+					     struct snd_pcm_substream *substream)
+{
+	snd_hda_codec_cleanup_stream(codec, hinfo->nid);
+	return 0;
+}
+
 static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
 			  struct hda_codec *codec,
 			  struct snd_pcm_substream *substream)
@@ -1202,6 +1208,7 @@
 	.open = hdmi_pcm_open,
 	.close = hdmi_pcm_close,
 	.prepare = generic_hdmi_playback_pcm_prepare,
+	.cleanup = generic_hdmi_playback_pcm_cleanup,
 };
 
 static int generic_hdmi_build_pcms(struct hda_codec *codec)
@@ -1220,7 +1227,6 @@
 		pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
 		pstr->substreams = 1;
 		pstr->ops = generic_ops;
-		pstr->nid = 1; /* FIXME: just for avoiding a debug WARNING */
 		/* other pstr fields are set in open */
 	}
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 344b221..4f81dd4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6099,6 +6099,8 @@
 	[ALC269_FIXUP_PCM_44K] = {
 		.type = ALC_FIXUP_FUNC,
 		.v.func = alc269_fixup_pcm_44k,
+		.chained = true,
+		.chain_id = ALC269_FIXUP_QUANTA_MUTE
 	},
 	[ALC269_FIXUP_STEREO_DMIC] = {
 		.type = ALC_FIXUP_FUNC,
@@ -6206,9 +6208,11 @@
 	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
+	SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+	SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
-	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
-	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
+	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
 
 #if 0
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 94040cc..ea5775a 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4272,7 +4272,8 @@
 	unsigned int gpio;
 	int i;
 
-	snd_hda_sequence_write(codec, spec->init);
+	if (spec->init)
+		snd_hda_sequence_write(codec, spec->init);
 
 	/* power down adcs initially */
 	if (spec->powerdown_adcs)
@@ -5748,7 +5749,6 @@
 		/* fallthru */
 	case 0x111d76b4: /* 6 Port without Analog Mixer */
 	case 0x111d76b5:
-		spec->init = stac92hd71bxx_core_init;
 		codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
 		spec->num_dmics = stac92xx_connected_ports(codec,
 					stac92hd71bxx_dmic_nids,
@@ -5773,7 +5773,6 @@
 			spec->stream_delay = 40; /* 40 milliseconds */
 
 		/* disable VSW */
-		spec->init = stac92hd71bxx_core_init;
 		unmute_init++;
 		snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0);
 		snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3);
@@ -5788,7 +5787,6 @@
 
 		/* fallthru */
 	default:
-		spec->init = stac92hd71bxx_core_init;
 		codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
 		spec->num_dmics = stac92xx_connected_ports(codec,
 					stac92hd71bxx_dmic_nids,
@@ -5796,6 +5794,9 @@
 		break;
 	}
 
+	if (get_wcaps_type(get_wcaps(codec, 0x28)) == AC_WID_VOL_KNB)
+		spec->init = stac92hd71bxx_core_init;
+
 	if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP)
 		snd_hda_sequence_write_cache(codec, unmute_init);
 
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 80d90cb..4307717 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -1752,6 +1752,14 @@
 {
 	struct via_spec *spec = codec->spec;
 	vt1708_stop_hp_work(spec);
+
+	if (spec->codec_type == VT1802) {
+		/* Fix pop noise on headphones */
+		int i;
+		for (i = 0; i < spec->autocfg.hp_outs; i++)
+			snd_hda_set_pin_ctl(codec, spec->autocfg.hp_pins[i], 0);
+	}
+
 	return 0;
 }
 #endif
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index d1ab437..5579b08 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -851,6 +851,8 @@
 	/* hardcoded device name & channel count */
 	err = snd_pcm_new(chip->card, (char *)card_name, 0,
 			  1, 1, &pcm);
+	if (err < 0)
+		return err;
 
 	pcm->private_data = chip;
 
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index b8ac871..b12308b 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6585,7 +6585,7 @@
 		snd_printk(KERN_ERR "HDSPM: "
 				"unable to kmalloc Mixer memory of %d Bytes\n",
 				(int)sizeof(struct hdspm_mixer));
-		return err;
+		return -ENOMEM;
 	}
 
 	hdspm->port_names_in = NULL;
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index 512434e..805ab6e 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -1377,8 +1377,9 @@
 	if (rc)
 		goto error_out_cleanup;
 
-	if (request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME,
-			sis)) {
+	rc = request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME,
+			 sis);
+	if (rc) {
 		dev_err(&pci->dev, "unable to allocate irq %d\n", sis->irq);
 		goto error_out_cleanup;
 	}
diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c
index f5ceb6f..210cafe 100644
--- a/sound/ppc/powermac.c
+++ b/sound/ppc/powermac.c
@@ -143,7 +143,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int snd_pmac_driver_suspend(struct device *dev)
 {
 	struct snd_card *card = dev_get_drvdata(dev);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 1aa52ef..9b18b52 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -1040,6 +1040,7 @@
 				   GFP_KERNEL);
 	if (!the_card.null_buffer_start_vaddr) {
 		pr_info("%s: nullbuffer alloc failed\n", __func__);
+		ret = -ENOMEM;
 		goto clean_preallocate;
 	}
 	pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__,
diff --git a/sound/soc/blackfin/bf6xx-sport.c b/sound/soc/blackfin/bf6xx-sport.c
index 318c5ba5..dfb7443 100644
--- a/sound/soc/blackfin/bf6xx-sport.c
+++ b/sound/soc/blackfin/bf6xx-sport.c
@@ -413,7 +413,14 @@
 
 void sport_delete(struct sport_device *sport)
 {
+	if (sport->tx_desc)
+		dma_free_coherent(NULL, sport->tx_desc_size,
+				sport->tx_desc, 0);
+	if (sport->rx_desc)
+		dma_free_coherent(NULL, sport->rx_desc_size,
+				sport->rx_desc, 0);
 	sport_free_resource(sport);
+	kfree(sport);
 }
 EXPORT_SYMBOL(sport_delete);
 
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 3c79592..23b4018 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2406,6 +2406,10 @@
 
 	/* Setup AB8500 according to board-settings */
 	pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent);
+
+	/* Inform SoC Core that we have our own I/O arrangements. */
+	codec->control_data = (void *)true;
+
 	status = ab8500_audio_setup_mics(codec, &pdata->codec->amics);
 	if (status < 0) {
 		pr_err("%s: Failed to setup mics (%d)!\n", __func__, status);
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 8c39ddd..11b1b71 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -186,6 +186,7 @@
 
 	printk(KERN_INFO "AD1980 SoC Audio Codec\n");
 
+	codec->control_data = codec;	/* we don't use regmap! */
 	ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
 	if (ret < 0) {
 		printk(KERN_ERR "ad1980: failed to register AC97 codec\n");
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 6276e35..8f726c0 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -581,6 +581,8 @@
 {
 	struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
 
+	codec->control_data = priv->mc13xxx;
+
 	mc13xxx_lock(priv->mc13xxx);
 
 	/* these are the reset values */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 8af6a52..df2f99d 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -239,6 +239,7 @@
 	{"Headphone Mux", "DAC", "DAC"},	/* dac --> hp_mux */
 	{"LO", NULL, "DAC"},			/* dac --> line_out */
 
+	{"LINE_IN", NULL, "VAG_POWER"},
 	{"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */
 	{"HP", NULL, "Headphone Mux"},		/* hp_mux --> hp */
 
@@ -1357,8 +1358,6 @@
 	if (ret)
 		goto err;
 
-	snd_soc_dapm_new_widgets(&codec->dapm);
-
 	return 0;
 
 err:
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 982e437..33c0f3d 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -340,6 +340,7 @@
 
 	printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION);
 
+	codec->control_data = codec;	/* we don't use regmap! */
 	ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
 	if (ret < 0)
 		goto codec_err;
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 6537f16..e33d327 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -128,13 +128,9 @@
 
 ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE),
-ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE),
-ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE),
 
 SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5,
 		   ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA),
-SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5,
-		   ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA),
 
 ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
@@ -236,8 +232,6 @@
 
 ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE);
 ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE);
 
 ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE);
 ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE);
@@ -349,10 +343,6 @@
 		 NULL, 0),
 SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0,
 		 NULL, 0),
-SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0,
-		 NULL, 0),
-SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0,
-		 NULL, 0),
 
 SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0,
 		 NULL, 0),
@@ -466,8 +456,6 @@
 
 ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"),
 ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"),
-ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"),
-ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"),
 
 ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"),
 ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"),
@@ -553,8 +541,6 @@
 	{ name, "EQ4", "EQ4" }, \
 	{ name, "DRC1L", "DRC1L" }, \
 	{ name, "DRC1R", "DRC1R" }, \
-	{ name, "DRC2L", "DRC2L" }, \
-	{ name, "DRC2R", "DRC2R" }, \
 	{ name, "LHPF1", "LHPF1" }, \
 	{ name, "LHPF2", "LHPF2" }, \
 	{ name, "LHPF3", "LHPF3" }, \
@@ -639,6 +625,15 @@
 	{ "AIF2 Capture", NULL, "SYSCLK" },
 	{ "AIF3 Capture", NULL, "SYSCLK" },
 
+	{ "IN1L PGA", NULL, "IN1L" },
+	{ "IN1R PGA", NULL, "IN1R" },
+
+	{ "IN2L PGA", NULL, "IN2L" },
+	{ "IN2R PGA", NULL, "IN2R" },
+
+	{ "IN3L PGA", NULL, "IN3L" },
+	{ "IN3R PGA", NULL, "IN3R" },
+
 	ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
 	ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
 	ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
@@ -675,8 +670,6 @@
 
 	ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"),
 	ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"),
-	ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"),
-	ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"),
 
 	ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"),
 	ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"),
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 8033f70..01ebbcc 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -681,6 +681,18 @@
 	{ "AIF2 Capture", NULL, "SYSCLK" },
 	{ "AIF3 Capture", NULL, "SYSCLK" },
 
+	{ "IN1L PGA", NULL, "IN1L" },
+	{ "IN1R PGA", NULL, "IN1R" },
+
+	{ "IN2L PGA", NULL, "IN2L" },
+	{ "IN2R PGA", NULL, "IN2R" },
+
+	{ "IN3L PGA", NULL, "IN3L" },
+	{ "IN3R PGA", NULL, "IN3R" },
+
+	{ "IN4L PGA", NULL, "IN4L" },
+	{ "IN4R PGA", NULL, "IN4R" },
+
 	ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
 	ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
 	ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index eaf6586..ce67200 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2501,6 +2501,9 @@
 		/* VMID 2*250k */
 		snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
 				    WM8962_VMID_SEL_MASK, 0x100);
+
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+			msleep(100);
 		break;
 
 	case SND_SOC_BIAS_OFF:
@@ -3730,21 +3733,6 @@
 
 	regcache_sync(wm8962->regmap);
 
-	regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP,
-			   WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA,
-			   WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA);
-
-	/* Bias enable at 2*50k for ramp */
-	regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1,
-			   WM8962_VMID_SEL_MASK | WM8962_BIAS_ENA,
-			   WM8962_BIAS_ENA | 0x180);
-
-	msleep(5);
-
-	/* VMID back to 2x250k for standby */
-	regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1,
-			   WM8962_VMID_SEL_MASK, 0x100);
-
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index bb62f4b..6c9eeca 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2649,7 +2649,7 @@
 		return -EINVAL;
 	}
 
-	bclk_rate = params_rate(params) * 2;
+	bclk_rate = params_rate(params) * 4;
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
 		bclk_rate *= 16;
@@ -3253,10 +3253,13 @@
 	int ret;
 	int report;
 
+	pm_runtime_get_sync(dev);
+
 	ret = regmap_read(regmap, WM8994_INTERRUPT_RAW_STATUS_2, &reg);
 	if (ret < 0) {
 		dev_err(dev, "Failed to read microphone status: %d\n",
 			ret);
+		pm_runtime_put(dev);
 		return;
 	}
 
@@ -3299,6 +3302,8 @@
 
 	snd_soc_jack_report(priv->micdet[1].jack, report,
 			    SND_JACK_HEADSET | SND_JACK_BTN_0);
+
+	pm_runtime_put(dev);
 }
 
 static irqreturn_t wm8994_mic_irq(int irq, void *data)
@@ -3421,12 +3426,15 @@
 	int reg;
 	bool present;
 
+	pm_runtime_get_sync(codec->dev);
+
 	mutex_lock(&wm8994->accdet_lock);
 
 	reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
 	if (reg < 0) {
 		dev_err(codec->dev, "Failed to read jack status: %d\n", reg);
 		mutex_unlock(&wm8994->accdet_lock);
+		pm_runtime_put(codec->dev);
 		return IRQ_NONE;
 	}
 
@@ -3491,6 +3499,7 @@
 				    SND_JACK_MECHANICAL | SND_JACK_HEADSET |
 				    wm8994->btn_mask);
 
+	pm_runtime_put(codec->dev);
 	return IRQ_HANDLED;
 }
 
@@ -3602,6 +3611,8 @@
 	if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA))
 		return IRQ_HANDLED;
 
+	pm_runtime_get_sync(codec->dev);
+
 	/* We may occasionally read a detection without an impedence
 	 * range being provided - if that happens loop again.
 	 */
@@ -3612,6 +3623,7 @@
 			dev_err(codec->dev,
 				"Failed to read mic detect status: %d\n",
 				reg);
+			pm_runtime_put(codec->dev);
 			return IRQ_NONE;
 		}
 
@@ -3639,6 +3651,7 @@
 		dev_warn(codec->dev, "Accessory detection with no callback\n");
 
 out:
+	pm_runtime_put(codec->dev);
 	return IRQ_HANDLED;
 }
 
@@ -4025,6 +4038,8 @@
 		break;
 	case WM8958:
 		if (wm8994->revision < 1) {
+			snd_soc_dapm_add_routes(dapm, wm8994_intercon,
+						ARRAY_SIZE(wm8994_intercon));
 			snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
 						ARRAY_SIZE(wm8994_revd_intercon));
 			snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 099e6ec..c6d2076 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -148,7 +148,7 @@
 
 SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1),
 SOC_ENUM("Capture Volume Steps", wm9712_enum[6]),
-SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1),
+SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 0),
 SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
 
 SOC_SINGLE_TLV("Mic 1 Volume", AC97_MIC, 8, 31, 1, main_tlv),
@@ -272,7 +272,7 @@
 
 /* Mic select */
 static const struct snd_kcontrol_new wm9712_mic_src_controls =
-SOC_DAPM_ENUM("Route", wm9712_enum[7]);
+SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]);
 
 /* diff select */
 static const struct snd_kcontrol_new wm9712_diff_sel_controls =
@@ -291,7 +291,9 @@
 	&wm9712_capture_selectl_controls),
 SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0,
 	&wm9712_capture_selectr_controls),
-SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0,
+SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0,
+	&wm9712_mic_src_controls),
+SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
 	&wm9712_mic_src_controls),
 SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
 	&wm9712_diff_sel_controls),
@@ -319,6 +321,7 @@
 SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0),
 SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0),
 SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0),
+SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0),
 SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1),
 SND_SOC_DAPM_OUTPUT("MONOOUT"),
 SND_SOC_DAPM_OUTPUT("HPOUTL"),
@@ -379,6 +382,18 @@
 	{"Mic PGA", NULL, "MIC1"},
 	{"Mic PGA", NULL, "MIC2"},
 
+	/* microphones */
+	{"Differential Mic", NULL, "MIC1"},
+	{"Differential Mic", NULL, "MIC2"},
+	{"Left Mic Select Source", "Mic 1", "MIC1"},
+	{"Left Mic Select Source", "Mic 2", "MIC2"},
+	{"Left Mic Select Source", "Stereo", "MIC1"},
+	{"Left Mic Select Source", "Differential", "Differential Mic"},
+	{"Right Mic Select Source", "Mic 1", "MIC1"},
+	{"Right Mic Select Source", "Mic 2", "MIC2"},
+	{"Right Mic Select Source", "Stereo", "MIC2"},
+	{"Right Mic Select Source", "Differential", "Differential Mic"},
+
 	/* left capture selector */
 	{"Left Capture Select", "Mic", "MIC1"},
 	{"Left Capture Select", "Speaker Mixer", "Speaker Mixer"},
@@ -619,6 +634,7 @@
 {
 	int ret = 0;
 
+	codec->control_data = codec;	/* we don't use regmap! */
 	ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
 	if (ret < 0) {
 		printk(KERN_ERR "wm9712: failed to register AC97 codec\n");
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 3eb19fb..d0b8a32 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1196,6 +1196,7 @@
 	if (wm9713 == NULL)
 		return -ENOMEM;
 	snd_soc_codec_set_drvdata(codec, wm9713);
+	codec->control_data = wm9713;	/* we don't use regmap! */
 
 	ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
 	if (ret < 0)
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 95441bf..ce5e5cd 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -380,14 +380,20 @@
 static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
 {
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		if (dev->txnumevt)	/* enable FIFO */
+		if (dev->txnumevt) {	/* enable FIFO */
+			mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+								FIFO_ENABLE);
 			mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
 								FIFO_ENABLE);
+		}
 		mcasp_start_tx(dev);
 	} else {
-		if (dev->rxnumevt)	/* enable FIFO */
+		if (dev->rxnumevt) {	/* enable FIFO */
+			mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+								FIFO_ENABLE);
 			mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
 								FIFO_ENABLE);
+		}
 		mcasp_start_rx(dev);
 	}
 }
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 28dd76c..81d7728 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -380,13 +380,14 @@
 static struct snd_soc_dai_driver imx_ssi_dai = {
 	.probe = imx_ssi_dai_probe,
 	.playback = {
-		.channels_min = 1,
+		/* The SSI does not support monaural audio. */
+		.channels_min = 2,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	},
 	.capture = {
-		.channels_min = 1,
+		.channels_min = 2,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/mxs/Kconfig b/sound/soc/mxs/Kconfig
index 99a997f..b6fa776 100644
--- a/sound/soc/mxs/Kconfig
+++ b/sound/soc/mxs/Kconfig
@@ -10,7 +10,7 @@
 if SND_MXS_SOC
 
 config SND_SOC_MXS_SGTL5000
-	tristate "SoC Audio support for i.MX boards with sgtl5000"
+	tristate "SoC Audio support for MXS boards with sgtl5000"
 	depends on I2C
 	select SND_SOC_SGTL5000
 	help
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index aba71bf..b303071 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -394,9 +394,14 @@
 			     struct snd_soc_dai *cpu_dai)
 {
 	struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
+	struct mxs_saif *master_saif;
 	u32 scr, stat;
 	int ret;
 
+	master_saif = mxs_saif_get_master(saif);
+	if (!master_saif)
+		return -EINVAL;
+
 	/* mclk should already be set */
 	if (!saif->mclk && saif->mclk_in_use) {
 		dev_err(cpu_dai->dev, "set mclk first\n");
@@ -420,6 +425,25 @@
 		return ret;
 	}
 
+	/* prepare clk in hw_param, enable in trigger */
+	clk_prepare(saif->clk);
+	if (saif != master_saif) {
+		/*
+		* Set an initial clock rate for the saif internal logic to work
+		* properly. This is important when working in EXTMASTER mode
+		* that uses the other saif's BITCLK&LRCLK but it still needs a
+		* basic clock which should be fast enough for the internal
+		* logic.
+		*/
+		clk_enable(saif->clk);
+		ret = clk_set_rate(saif->clk, 24000000);
+		clk_disable(saif->clk);
+		if (ret)
+			return ret;
+
+		clk_prepare(master_saif->clk);
+	}
+
 	scr = __raw_readl(saif->base + SAIF_CTRL);
 
 	scr &= ~BM_SAIF_CTRL_WORD_LENGTH;
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 34835e8..d33c48b 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -745,7 +745,7 @@
 {
 	const char *signal, *src;
 
-	if (mcbsp->pdata->mux_signal)
+	if (!mcbsp->pdata->mux_signal)
 		return -EINVAL;
 
 	switch (mux) {
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 1046083..acdd3ef 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -820,3 +820,4 @@
 MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
 MODULE_DESCRIPTION("OMAP I2S SoC Interface");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap-mcbsp");
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 5a649da..f0feb06 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -441,3 +441,4 @@
 MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
 MODULE_DESCRIPTION("OMAP PCM DMA module");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap-pcm-audio");
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index b7b2a1f..89b0646 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -20,7 +20,7 @@
 #include <sound/pcm_params.h>
 
 #include <plat/audio.h>
-#include <plat/dma.h>
+#include <mach/dma.h>
 
 #include "dma.h"
 #include "pcm.h"
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f219b2f..c501af6 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -826,7 +826,7 @@
 	}
 
 	if (!rtd->cpu_dai) {
-		dev_dbg(card->dev, "CPU DAI %s not registered\n",
+		dev_err(card->dev, "CPU DAI %s not registered\n",
 			dai_link->cpu_dai_name);
 		return -EPROBE_DEFER;
 	}
@@ -857,14 +857,14 @@
 		}
 
 		if (!rtd->codec_dai) {
-			dev_dbg(card->dev, "CODEC DAI %s not registered\n",
+			dev_err(card->dev, "CODEC DAI %s not registered\n",
 				dai_link->codec_dai_name);
 			return -EPROBE_DEFER;
 		}
 	}
 
 	if (!rtd->codec) {
-		dev_dbg(card->dev, "CODEC %s not registered\n",
+		dev_err(card->dev, "CODEC %s not registered\n",
 			dai_link->codec_name);
 		return -EPROBE_DEFER;
 	}
@@ -888,7 +888,7 @@
 		rtd->platform = platform;
 	}
 	if (!rtd->platform) {
-		dev_dbg(card->dev, "platform %s not registered\n",
+		dev_err(card->dev, "platform %s not registered\n",
 			dai_link->platform_name);
 		return -EPROBE_DEFER;
 	}
@@ -1096,7 +1096,7 @@
 	}
 
 	/* If the driver didn't set I/O up try regmap */
-	if (!codec->control_data)
+	if (!codec->write && dev_get_regmap(codec->dev, NULL))
 		snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
 
 	if (driver->controls)
@@ -1481,6 +1481,8 @@
 			return 0;
 	}
 
+	dev_err(card->dev, "%s not registered\n", aux_dev->codec_name);
+
 	return -EPROBE_DEFER;
 }
 
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 7f8b3b7..0c17293 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -103,7 +103,7 @@
 	}
 
 	/* Report before the DAPM sync to help users updating micbias status */
-	blocking_notifier_call_chain(&jack->notifier, status, jack);
+	blocking_notifier_call_chain(&jack->notifier, jack->status, jack);
 
 	snd_soc_dapm_sync(dapm);
 
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index d684df2..e463529 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -177,7 +177,7 @@
 	}
 
 	alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
-	if (alc5632->gpio_hp_det == -ENODEV)
+	if (alc5632->gpio_hp_det == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	ret = snd_soc_of_parse_card_name(card, "nvidia,model");
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 0c5bb33..d4f14e4 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -284,27 +284,27 @@
 	} else if (np) {
 		pdata->gpio_spkr_en = of_get_named_gpio(np,
 						"nvidia,spkr-en-gpios", 0);
-		if (pdata->gpio_spkr_en == -ENODEV)
+		if (pdata->gpio_spkr_en == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
 
 		pdata->gpio_hp_mute = of_get_named_gpio(np,
 						"nvidia,hp-mute-gpios", 0);
-		if (pdata->gpio_hp_mute == -ENODEV)
+		if (pdata->gpio_hp_mute == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
 
 		pdata->gpio_hp_det = of_get_named_gpio(np,
 						"nvidia,hp-det-gpios", 0);
-		if (pdata->gpio_hp_det == -ENODEV)
+		if (pdata->gpio_hp_det == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
 
 		pdata->gpio_int_mic_en = of_get_named_gpio(np,
 						"nvidia,int-mic-en-gpios", 0);
-		if (pdata->gpio_int_mic_en == -ENODEV)
+		if (pdata->gpio_int_mic_en == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
 
 		pdata->gpio_ext_mic_en = of_get_named_gpio(np,
 						"nvidia,ext-mic-en-gpios", 0);
-		if (pdata->gpio_ext_mic_en == -ENODEV)
+		if (pdata->gpio_ext_mic_en == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
 	}
 
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 62ac028..057e28e 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -21,7 +21,7 @@
 #include <linux/mfd/dbx500-prcmu.h>
 
 #include <mach/hardware.h>
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
 
 #include <sound/soc.h>
 #include <sound/soc-dai.h>
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index ee14d2d..5c472f3 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -19,7 +19,7 @@
 #include <linux/slab.h>
 
 #include <mach/hardware.h>
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
 
 #include <sound/soc.h>
 
diff --git a/sound/soc/ux500/ux500_msp_i2s.h b/sound/soc/ux500/ux500_msp_i2s.h
index 7f71b4a..2d9136d 100644
--- a/sound/soc/ux500/ux500_msp_i2s.h
+++ b/sound/soc/ux500/ux500_msp_i2s.h
@@ -17,7 +17,7 @@
 
 #include <linux/platform_device.h>
 
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
 
 #define MSP_INPUT_FREQ_APB 48000000
 
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 0f647d2..c411812 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -821,10 +821,6 @@
 	if (++ep->use_count != 1)
 		return 0;
 
-	/* just to be sure */
-	deactivate_urbs(ep, 0, 1);
-	wait_clear_urbs(ep);
-
 	ep->active_mask = 0;
 	ep->unlink_mask = 0;
 	ep->phase = 0;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index a1298f3..62ec808 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -544,6 +544,9 @@
 	subs->last_frame_number = 0;
 	runtime->delay = 0;
 
+	/* clear the pending deactivation on the target EPs */
+	deactivate_endpoints(subs);
+
 	/* for playback, submit the URBs now; otherwise, the first hwptr_done
 	 * updates for all URBs would happen at the same time when starting */
 	if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 77f124f..35655c3 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -319,6 +319,8 @@
 LIB_H += util/cgroup.h
 LIB_H += $(TRACE_EVENT_DIR)event-parse.h
 LIB_H += util/target.h
+LIB_H += util/rblist.h
+LIB_H += util/intlist.h
 
 LIB_OBJS += $(OUTPUT)util/abspath.o
 LIB_OBJS += $(OUTPUT)util/alias.o
@@ -383,6 +385,8 @@
 LIB_OBJS += $(OUTPUT)util/cpumap.o
 LIB_OBJS += $(OUTPUT)util/cgroup.o
 LIB_OBJS += $(OUTPUT)util/target.o
+LIB_OBJS += $(OUTPUT)util/rblist.o
+LIB_OBJS += $(OUTPUT)util/intlist.o
 
 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
 
@@ -983,7 +987,8 @@
 	$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
 	$(MAKE) -C Documentation/ clean
 	$(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
-	$(RM) $(OUTPUT)util/*-{bison,flex}*
+	$(RM) $(OUTPUT)util/*-bison*
+	$(RM) $(OUTPUT)util/*-flex*
 	$(python-clean)
 
 .PHONY: all install clean strip $(LIBTRACEEVENT)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f5a6452..4db6e1b 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -313,7 +313,7 @@
 		}
  	}
 
-	perf_session__update_sample_type(session);
+	perf_session__set_id_hdr_size(session);
 }
 
 static int process_buildids(struct perf_record *rec)
@@ -844,8 +844,6 @@
 	struct perf_record *rec = &record;
 	char errbuf[BUFSIZ];
 
-	perf_header__set_cmdline(argc, argv);
-
 	evsel_list = perf_evlist__new(NULL, NULL);
 	if (evsel_list == NULL)
 		return -ENOMEM;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 69b1c11..7c88a24 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -249,8 +249,9 @@
 static int perf_report__setup_sample_type(struct perf_report *rep)
 {
 	struct perf_session *self = rep->session;
+	u64 sample_type = perf_evlist__sample_type(self->evlist);
 
-	if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+	if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
 		if (sort__has_parent) {
 			ui__error("Selected --sort parent, but no "
 				    "callchain data. Did you call "
@@ -274,7 +275,7 @@
 
 	if (sort__branch_mode == 1) {
 		if (!self->fd_pipe &&
-		    !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
+		    !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
 			ui__error("Selected -b but no branch data. "
 				  "Did you call perf record without -b?\n");
 			return -1;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index d909eb7..1d592f5 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -478,7 +478,6 @@
 	unsigned int nr_events[nsyscalls],
 		     expected_nr_events[nsyscalls], i, j;
 	struct perf_evsel *evsels[nsyscalls], *evsel;
-	int sample_size = __perf_evsel__sample_size(attr.sample_type);
 
 	for (i = 0; i < nsyscalls; ++i) {
 		char name[64];
@@ -563,8 +562,7 @@
 			goto out_munmap;
 		}
 
-		err = perf_event__parse_sample(event, attr.sample_type, sample_size,
-					       false, &sample, false);
+		err = perf_evlist__parse_sample(evlist, event, &sample, false);
 		if (err) {
 			pr_err("Can't parse sample, err = %d\n", err);
 			goto out_munmap;
@@ -661,12 +659,12 @@
 	const char *cmd = "sleep";
 	const char *argv[] = { cmd, "1", NULL, };
 	char *bname;
-	u64 sample_type, prev_time = 0;
+	u64 prev_time = 0;
 	bool found_cmd_mmap = false,
 	     found_libc_mmap = false,
 	     found_vdso_mmap = false,
 	     found_ld_mmap = false;
-	int err = -1, errs = 0, i, wakeups = 0, sample_size;
+	int err = -1, errs = 0, i, wakeups = 0;
 	u32 cpu;
 	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
 
@@ -757,13 +755,6 @@
 	}
 
 	/*
-	 * We'll need these two to parse the PERF_SAMPLE_* fields in each
-	 * event.
-	 */
-	sample_type = perf_evlist__sample_type(evlist);
-	sample_size = __perf_evsel__sample_size(sample_type);
-
-	/*
 	 * Now that all is properly set up, enable the events, they will
 	 * count just on workload.pid, which will start...
 	 */
@@ -788,9 +779,7 @@
 				if (type < PERF_RECORD_MAX)
 					nr_events[type]++;
 
-				err = perf_event__parse_sample(event, sample_type,
-							       sample_size, true,
-							       &sample, false);
+				err = perf_evlist__parse_sample(evlist, event, &sample, false);
 				if (err < 0) {
 					if (verbose)
 						perf_event__fprintf(event, stderr);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 35e86c6..68cd61e 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -38,6 +38,7 @@
 #include "util/cpumap.h"
 #include "util/xyarray.h"
 #include "util/sort.h"
+#include "util/intlist.h"
 
 #include "util/debug.h"
 
@@ -706,8 +707,16 @@
 	int err;
 
 	if (!machine && perf_guest) {
-		pr_err("Can't find guest [%d]'s kernel information\n",
-			event->ip.pid);
+		static struct intlist *seen;
+
+		if (!seen)
+			seen = intlist__new();
+
+		if (!intlist__has_entry(seen, event->ip.pid)) {
+			pr_err("Can't find guest [%d]'s kernel information\n",
+				event->ip.pid);
+			intlist__add(seen, event->ip.pid);
+		}
 		return;
 	}
 
@@ -811,7 +820,7 @@
 	int ret;
 
 	while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
-		ret = perf_session__parse_sample(session, event, &sample);
+		ret = perf_evlist__parse_sample(top->evlist, event, &sample, false);
 		if (ret) {
 			pr_err("Can't parse sample, err = %d\n", ret);
 			continue;
@@ -943,8 +952,10 @@
 			 * based cpu-clock-tick sw counter, which
 			 * is always available even if no PMU support:
 			 */
-			if (attr->type == PERF_TYPE_HARDWARE &&
-			    attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+			if ((err == ENOENT || err == ENXIO) &&
+			    (attr->type == PERF_TYPE_HARDWARE) &&
+			    (attr->config == PERF_COUNT_HW_CPU_CYCLES)) {
+
 				if (verbose)
 					ui__warning("Cycles event not supported,\n"
 						    "trying to fall back to cpu-clock-ticks\n");
@@ -1032,7 +1043,7 @@
 					       &top->session->host_machine);
 	perf_top__start_counters(top);
 	top->session->evlist = top->evlist;
-	perf_session__update_sample_type(top->session);
+	perf_session__set_id_hdr_size(top->session);
 
 	/* Wait for a minimal set of events before starting the snapshot */
 	poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1b19728..d84870b 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -197,9 +197,6 @@
 
 const char *perf_event__name(unsigned int id);
 
-int perf_event__parse_sample(const union perf_event *event, u64 type,
-			     int sample_size, bool sample_id_all,
-			     struct perf_sample *sample, bool swapped);
 int perf_event__synthesize_sample(union perf_event *event, u64 type,
 				  const struct perf_sample *sample,
 				  bool swapped);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 3edfd34..9b38681 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -881,3 +881,10 @@
 
 	return 0;
 }
+
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+			      struct perf_sample *sample, bool swapped)
+{
+	struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node);
+	return perf_evsel__parse_sample(e, event, sample, swapped);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 40d4d3c..528c1ac 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -122,6 +122,9 @@
 bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
 
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+			      struct perf_sample *sample, bool swapped);
+
 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
 
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e817713..2eaae14 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -20,7 +20,7 @@
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
 
-int __perf_evsel__sample_size(u64 sample_type)
+static int __perf_evsel__sample_size(u64 sample_type)
 {
 	u64 mask = sample_type & PERF_SAMPLE_MASK;
 	int size = 0;
@@ -53,6 +53,7 @@
 	evsel->attr	   = *attr;
 	INIT_LIST_HEAD(&evsel->node);
 	hists__init(&evsel->hists);
+	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
 }
 
 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
@@ -728,10 +729,10 @@
 	return false;
 }
 
-int perf_event__parse_sample(const union perf_event *event, u64 type,
-			     int sample_size, bool sample_id_all,
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
 			     struct perf_sample *data, bool swapped)
 {
+	u64 type = evsel->attr.sample_type;
 	const u64 *array;
 
 	/*
@@ -746,14 +747,14 @@
 	data->period = 1;
 
 	if (event->header.type != PERF_RECORD_SAMPLE) {
-		if (!sample_id_all)
+		if (!evsel->attr.sample_id_all)
 			return 0;
 		return perf_event__parse_id_sample(event, type, data, swapped);
 	}
 
 	array = event->sample.array;
 
-	if (sample_size + sizeof(event->header) > event->header.size)
+	if (evsel->sample_size + sizeof(event->header) > event->header.size)
 		return -EFAULT;
 
 	if (type & PERF_SAMPLE_IP) {
@@ -895,7 +896,7 @@
 		u.val32[1] = sample->tid;
 		if (swapped) {
 			/*
-			 * Inverse of what is done in perf_event__parse_sample
+			 * Inverse of what is done in perf_evsel__parse_sample
 			 */
 			u.val32[0] = bswap_32(u.val32[0]);
 			u.val32[1] = bswap_32(u.val32[1]);
@@ -930,7 +931,7 @@
 		u.val32[0] = sample->cpu;
 		if (swapped) {
 			/*
-			 * Inverse of what is done in perf_event__parse_sample
+			 * Inverse of what is done in perf_evsel__parse_sample
 			 */
 			u.val32[0] = bswap_32(u.val32[0]);
 			u.val64 = bswap_64(u.val64);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 67cc503..b559929 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -65,6 +65,7 @@
 		void		*func;
 		void		*data;
 	} handler;
+	unsigned int		sample_size;
 	bool 			supported;
 };
 
@@ -177,13 +178,8 @@
 	return __perf_evsel__read(evsel, ncpus, nthreads, true);
 }
 
-int __perf_evsel__sample_size(u64 sample_type);
-
-static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
-{
-	return __perf_evsel__sample_size(evsel->attr.sample_type);
-}
-
 void hists__init(struct hists *hists);
 
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+			     struct perf_sample *sample, bool swapped);
 #endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3a6d204..74ea3c2f 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -174,6 +174,15 @@
 {
 	int i;
 
+	/*
+	 * If header_argv has already been set, do not override it.
+	 * This allows a command to set the cmdline, parse args and
+	 * then call another builtin function that implements a
+	 * command -- e.g, cmd_kvm calling cmd_record.
+	 */
+	if (header_argv)
+		return 0;
+
 	header_argc = (u32)argc;
 
 	/* do not include NULL termination */
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
new file mode 100644
index 0000000..fd530dc
--- /dev/null
+++ b/tools/perf/util/intlist.c
@@ -0,0 +1,101 @@
+/*
+ * Based on intlist.c by:
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+
+#include "intlist.h"
+
+static struct rb_node *intlist__node_new(struct rblist *rblist __used,
+					 const void *entry)
+{
+	int i = (int)((long)entry);
+	struct rb_node *rc = NULL;
+	struct int_node *node = malloc(sizeof(*node));
+
+	if (node != NULL) {
+		node->i = i;
+		rc = &node->rb_node;
+	}
+
+	return rc;
+}
+
+static void int_node__delete(struct int_node *ilist)
+{
+	free(ilist);
+}
+
+static void intlist__node_delete(struct rblist *rblist __used,
+				 struct rb_node *rb_node)
+{
+	struct int_node *node = container_of(rb_node, struct int_node, rb_node);
+
+	int_node__delete(node);
+}
+
+static int intlist__node_cmp(struct rb_node *rb_node, const void *entry)
+{
+	int i = (int)((long)entry);
+	struct int_node *node = container_of(rb_node, struct int_node, rb_node);
+
+	return node->i - i;
+}
+
+int intlist__add(struct intlist *ilist, int i)
+{
+	return rblist__add_node(&ilist->rblist, (void *)((long)i));
+}
+
+void intlist__remove(struct intlist *ilist __used, struct int_node *node)
+{
+	int_node__delete(node);
+}
+
+struct int_node *intlist__find(struct intlist *ilist, int i)
+{
+	struct int_node *node = NULL;
+	struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
+
+	if (rb_node)
+		node = container_of(rb_node, struct int_node, rb_node);
+
+	return node;
+}
+
+struct intlist *intlist__new(void)
+{
+	struct intlist *ilist = malloc(sizeof(*ilist));
+
+	if (ilist != NULL) {
+		rblist__init(&ilist->rblist);
+		ilist->rblist.node_cmp    = intlist__node_cmp;
+		ilist->rblist.node_new    = intlist__node_new;
+		ilist->rblist.node_delete = intlist__node_delete;
+	}
+
+	return ilist;
+}
+
+void intlist__delete(struct intlist *ilist)
+{
+	if (ilist != NULL)
+		rblist__delete(&ilist->rblist);
+}
+
+struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx)
+{
+	struct int_node *node = NULL;
+	struct rb_node *rb_node;
+
+	rb_node = rblist__entry(&ilist->rblist, idx);
+	if (rb_node)
+		node = container_of(rb_node, struct int_node, rb_node);
+
+	return node;
+}
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
new file mode 100644
index 0000000..6d63ab9
--- /dev/null
+++ b/tools/perf/util/intlist.h
@@ -0,0 +1,75 @@
+#ifndef __PERF_INTLIST_H
+#define __PERF_INTLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+#include "rblist.h"
+
+struct int_node {
+	struct rb_node rb_node;
+	int i;
+};
+
+struct intlist {
+	struct rblist rblist;
+};
+
+struct intlist *intlist__new(void);
+void intlist__delete(struct intlist *ilist);
+
+void intlist__remove(struct intlist *ilist, struct int_node *in);
+int intlist__add(struct intlist *ilist, int i);
+
+struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx);
+struct int_node *intlist__find(struct intlist *ilist, int i);
+
+static inline bool intlist__has_entry(struct intlist *ilist, int i)
+{
+	return intlist__find(ilist, i) != NULL;
+}
+
+static inline bool intlist__empty(const struct intlist *ilist)
+{
+	return rblist__empty(&ilist->rblist);
+}
+
+static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
+{
+	return rblist__nr_entries(&ilist->rblist);
+}
+
+/* For intlist iteration */
+static inline struct int_node *intlist__first(struct intlist *ilist)
+{
+	struct rb_node *rn = rb_first(&ilist->rblist.entries);
+	return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
+}
+static inline struct int_node *intlist__next(struct int_node *in)
+{
+	struct rb_node *rn;
+	if (!in)
+		return NULL;
+	rn = rb_next(&in->rb_node);
+	return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
+}
+
+/**
+ * intlist_for_each      - iterate over a intlist
+ * @pos:	the &struct int_node to use as a loop cursor.
+ * @ilist:	the &struct intlist for loop.
+ */
+#define intlist__for_each(pos, ilist)	\
+	for (pos = intlist__first(ilist); pos; pos = intlist__next(pos))
+
+/**
+ * intlist_for_each_safe - iterate over a intlist safe against removal of
+ *                         int_node
+ * @pos:	the &struct int_node to use as a loop cursor.
+ * @n:		another &struct int_node to use as temporary storage.
+ * @ilist:	the &struct intlist for loop.
+ */
+#define intlist__for_each_safe(pos, n, ilist)	\
+	for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\
+	     pos = n, n = intlist__next(n))
+#endif /* __PERF_INTLIST_H */
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c
index 1b997d2..127d648 100644
--- a/tools/perf/util/parse-events-test.c
+++ b/tools/perf/util/parse-events-test.c
@@ -13,6 +13,9 @@
 	} \
 } while (0)
 
+#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
+			     PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+
 static int test__checkevent_tracepoint(struct perf_evlist *evlist)
 {
 	struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -21,8 +24,7 @@
 	TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 	TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
 	TEST_ASSERT_VAL("wrong sample_type",
-		(PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
-		evsel->attr.sample_type);
+		PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
 	TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
 	return 0;
 }
@@ -37,8 +39,7 @@
 		TEST_ASSERT_VAL("wrong type",
 			PERF_TYPE_TRACEPOINT == evsel->attr.type);
 		TEST_ASSERT_VAL("wrong sample_type",
-			(PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
-			== evsel->attr.sample_type);
+			PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
 		TEST_ASSERT_VAL("wrong sample_period",
 			1 == evsel->attr.sample_period);
 	}
@@ -428,8 +429,7 @@
 	evsel = list_entry(evsel->node.next, struct perf_evsel, node);
 	TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
 	TEST_ASSERT_VAL("wrong sample_type",
-		(PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
-		evsel->attr.sample_type);
+		PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
 	TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
 	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
 	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 99d02aa..594f8fa 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -1,6 +1,7 @@
 #include "util.h"
 #include "parse-options.h"
 #include "cache.h"
+#include "header.h"
 
 #define OPT_SHORT 1
 #define OPT_UNSET 2
@@ -413,6 +414,8 @@
 {
 	struct parse_opt_ctx_t ctx;
 
+	perf_header__set_cmdline(argc, argv);
+
 	parse_options_start(&ctx, argc, argv, flags);
 	switch (parse_options_step(&ctx, options, usagestr)) {
 	case PARSE_OPT_HELP:
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index e03b58a..0688bfb 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -797,17 +797,13 @@
 
 	event = perf_evlist__mmap_read(evlist, cpu);
 	if (event != NULL) {
-		struct perf_evsel *first;
 		PyObject *pyevent = pyrf_event__new(event);
 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
 
 		if (pyevent == NULL)
 			return PyErr_NoMemory();
 
-		first = list_entry(evlist->entries.next, struct perf_evsel, node);
-		err = perf_event__parse_sample(event, first->attr.sample_type,
-					       perf_evsel__sample_size(first),
-					       sample_id_all, &pevent->sample, false);
+		err = perf_evlist__parse_sample(evlist, event, &pevent->sample, false);
 		if (err)
 			return PyErr_Format(PyExc_OSError,
 					    "perf: can't parse sample, err=%d", err);
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
new file mode 100644
index 0000000..0171fb6
--- /dev/null
+++ b/tools/perf/util/rblist.c
@@ -0,0 +1,107 @@
+/*
+ * Based on strlist.c by:
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "rblist.h"
+
+int rblist__add_node(struct rblist *rblist, const void *new_entry)
+{
+	struct rb_node **p = &rblist->entries.rb_node;
+	struct rb_node *parent = NULL, *new_node;
+
+	while (*p != NULL) {
+		int rc;
+
+		parent = *p;
+
+		rc = rblist->node_cmp(parent, new_entry);
+		if (rc > 0)
+			p = &(*p)->rb_left;
+		else if (rc < 0)
+			p = &(*p)->rb_right;
+		else
+			return -EEXIST;
+	}
+
+	new_node = rblist->node_new(rblist, new_entry);
+	if (new_node == NULL)
+		return -ENOMEM;
+
+	rb_link_node(new_node, parent, p);
+	rb_insert_color(new_node, &rblist->entries);
+	++rblist->nr_entries;
+
+	return 0;
+}
+
+void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
+{
+	rb_erase(rb_node, &rblist->entries);
+	rblist->node_delete(rblist, rb_node);
+}
+
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
+{
+	struct rb_node **p = &rblist->entries.rb_node;
+	struct rb_node *parent = NULL;
+
+	while (*p != NULL) {
+		int rc;
+
+		parent = *p;
+
+		rc = rblist->node_cmp(parent, entry);
+		if (rc > 0)
+			p = &(*p)->rb_left;
+		else if (rc < 0)
+			p = &(*p)->rb_right;
+		else
+			return parent;
+	}
+
+	return NULL;
+}
+
+void rblist__init(struct rblist *rblist)
+{
+	if (rblist != NULL) {
+		rblist->entries	 = RB_ROOT;
+		rblist->nr_entries = 0;
+	}
+
+	return;
+}
+
+void rblist__delete(struct rblist *rblist)
+{
+	if (rblist != NULL) {
+		struct rb_node *pos, *next = rb_first(&rblist->entries);
+
+		while (next) {
+			pos = next;
+			next = rb_next(pos);
+			rb_erase(pos, &rblist->entries);
+			rblist->node_delete(rblist, pos);
+		}
+		free(rblist);
+	}
+}
+
+struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
+{
+	struct rb_node *node;
+
+	for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
+		if (!idx--)
+			return node;
+	}
+
+	return NULL;
+}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
new file mode 100644
index 0000000..6d0cae5
--- /dev/null
+++ b/tools/perf/util/rblist.h
@@ -0,0 +1,47 @@
+#ifndef __PERF_RBLIST_H
+#define __PERF_RBLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+/*
+ * create node structs of the form:
+ * struct my_node {
+ *     struct rb_node rb_node;
+ *     ... my data ...
+ * };
+ *
+ * create list structs of the form:
+ * struct mylist {
+ *     struct rblist rblist;
+ *     ... my data ...
+ * };
+ */
+
+struct rblist {
+	struct rb_root entries;
+	unsigned int   nr_entries;
+
+	int (*node_cmp)(struct rb_node *rbn, const void *entry);
+	struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry);
+	void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node);
+};
+
+void rblist__init(struct rblist *rblist);
+void rblist__delete(struct rblist *rblist);
+int rblist__add_node(struct rblist *rblist, const void *new_entry);
+void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
+struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx);
+
+static inline bool rblist__empty(const struct rblist *rblist)
+{
+	return rblist->nr_entries == 0;
+}
+
+static inline unsigned int rblist__nr_entries(const struct rblist *rblist)
+{
+	return rblist->nr_entries;
+}
+
+#endif /* __PERF_RBLIST_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8e4f075..2437fb0 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -80,14 +80,12 @@
 	return -1;
 }
 
-void perf_session__update_sample_type(struct perf_session *self)
+void perf_session__set_id_hdr_size(struct perf_session *session)
 {
-	self->sample_type = perf_evlist__sample_type(self->evlist);
-	self->sample_size = __perf_evsel__sample_size(self->sample_type);
-	self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
-	self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
-	self->host_machine.id_hdr_size = self->id_hdr_size;
-	machines__set_id_hdr_size(&self->machines, self->id_hdr_size);
+	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
+
+	session->host_machine.id_hdr_size = id_hdr_size;
+	machines__set_id_hdr_size(&session->machines, id_hdr_size);
 }
 
 int perf_session__create_kernel_maps(struct perf_session *self)
@@ -147,7 +145,7 @@
 	if (mode == O_RDONLY) {
 		if (perf_session__open(self, force) < 0)
 			goto out_delete;
-		perf_session__update_sample_type(self);
+		perf_session__set_id_hdr_size(self);
 	} else if (mode == O_WRONLY) {
 		/*
 		 * In O_RDONLY mode this will be performed when reading the
@@ -158,7 +156,7 @@
 	}
 
 	if (tool && tool->ordering_requires_timestamps &&
-	    tool->ordered_samples && !self->sample_id_all) {
+	    tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
 		tool->ordered_samples = false;
 	}
@@ -673,7 +671,8 @@
 		if (iter->timestamp > limit)
 			break;
 
-		ret = perf_session__parse_sample(s, iter->event, &sample);
+		ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample,
+						s->header.needs_swap);
 		if (ret)
 			pr_err("Can't parse sample, err = %d\n", ret);
 		else
@@ -865,16 +864,18 @@
 				       union perf_event *event,
 				       struct perf_sample *sample)
 {
+	u64 sample_type = perf_evlist__sample_type(session->evlist);
+
 	if (event->header.type != PERF_RECORD_SAMPLE &&
-	    !session->sample_id_all) {
+	    !perf_evlist__sample_id_all(session->evlist)) {
 		fputs("-1 -1 ", stdout);
 		return;
 	}
 
-	if ((session->sample_type & PERF_SAMPLE_CPU))
+	if ((sample_type & PERF_SAMPLE_CPU))
 		printf("%u ", sample->cpu);
 
-	if (session->sample_type & PERF_SAMPLE_TIME)
+	if (sample_type & PERF_SAMPLE_TIME)
 		printf("%" PRIu64 " ", sample->time);
 }
 
@@ -899,6 +900,8 @@
 static void dump_sample(struct perf_session *session, union perf_event *event,
 			struct perf_sample *sample)
 {
+	u64 sample_type;
+
 	if (!dump_trace)
 		return;
 
@@ -906,10 +909,12 @@
 	       event->header.misc, sample->pid, sample->tid, sample->ip,
 	       sample->period, sample->addr);
 
-	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
+	sample_type = perf_evlist__sample_type(session->evlist);
+
+	if (sample_type & PERF_SAMPLE_CALLCHAIN)
 		callchain__printf(sample);
 
-	if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
+	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
 		branch_stack__printf(sample);
 }
 
@@ -1006,7 +1011,7 @@
 					   union perf_event *event, struct perf_sample *sample)
 {
 	if (event->header.type != PERF_RECORD_SAMPLE ||
-	    !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
+	    !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
 		return 0;
 
 	if (!ip_callchain__valid(sample->callchain, event)) {
@@ -1030,7 +1035,7 @@
 	case PERF_RECORD_HEADER_ATTR:
 		err = tool->attr(event, &session->evlist);
 		if (err == 0)
-			perf_session__update_sample_type(session);
+			perf_session__set_id_hdr_size(session);
 		return err;
 	case PERF_RECORD_HEADER_EVENT_TYPE:
 		return tool->event_type(tool, event);
@@ -1065,7 +1070,7 @@
 	int ret;
 
 	if (session->header.needs_swap)
-		event_swap(event, session->sample_id_all);
+		event_swap(event, perf_evlist__sample_id_all(session->evlist));
 
 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
 		return -EINVAL;
@@ -1078,7 +1083,8 @@
 	/*
 	 * For all kernel events we get the sample data
 	 */
-	ret = perf_session__parse_sample(session, event, &sample);
+	ret = perf_evlist__parse_sample(session->evlist, event, &sample,
+					session->header.needs_swap);
 	if (ret)
 		return ret;
 
@@ -1389,9 +1395,9 @@
 	return err;
 }
 
-bool perf_session__has_traces(struct perf_session *self, const char *msg)
+bool perf_session__has_traces(struct perf_session *session, const char *msg)
 {
-	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
+	if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
 		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
 		return false;
 	}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 7c435bd..1f7ec87 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -41,13 +41,9 @@
 	 *	  perf.data file.
 	 */
 	struct hists		hists;
-	u64			sample_type;
-	int			sample_size;
 	int			fd;
 	bool			fd_pipe;
 	bool			repipe;
-	bool			sample_id_all;
-	u16			id_hdr_size;
 	int			cwdlen;
 	char			*cwd;
 	struct ordered_samples	ordered_samples;
@@ -86,7 +82,7 @@
 
 int perf_session__create_kernel_maps(struct perf_session *self);
 
-void perf_session__update_sample_type(struct perf_session *self);
+void perf_session__set_id_hdr_size(struct perf_session *session);
 void perf_session__remove_thread(struct perf_session *self, struct thread *th);
 
 static inline
@@ -130,24 +126,6 @@
 
 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
 
-static inline int perf_session__parse_sample(struct perf_session *session,
-					     const union perf_event *event,
-					     struct perf_sample *sample)
-{
-	return perf_event__parse_sample(event, session->sample_type,
-					session->sample_size,
-					session->sample_id_all, sample,
-					session->header.needs_swap);
-}
-
-static inline int perf_session__synthesize_sample(struct perf_session *session,
-						  union perf_event *event,
-						  const struct perf_sample *sample)
-{
-	return perf_event__synthesize_sample(event, session->sample_type,
-					     sample, session->header.needs_swap);
-}
-
 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
 					    unsigned int type);
 
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 6783a20..95856ff 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -10,23 +10,28 @@
 #include <stdlib.h>
 #include <string.h>
 
-static struct str_node *str_node__new(const char *s, bool dupstr)
+static
+struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry)
 {
-	struct str_node *self = malloc(sizeof(*self));
+	const char *s = entry;
+	struct rb_node *rc = NULL;
+	struct strlist *strlist = container_of(rblist, struct strlist, rblist);
+	struct str_node *snode = malloc(sizeof(*snode));
 
-	if (self != NULL) {
-		if (dupstr) {
+	if (snode != NULL) {
+		if (strlist->dupstr) {
 			s = strdup(s);
 			if (s == NULL)
 				goto out_delete;
 		}
-		self->s = s;
+		snode->s = s;
+		rc = &snode->rb_node;
 	}
 
-	return self;
+	return rc;
 
 out_delete:
-	free(self);
+	free(snode);
 	return NULL;
 }
 
@@ -37,36 +42,26 @@
 	free(self);
 }
 
+static
+void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node)
+{
+	struct strlist *slist = container_of(rblist, struct strlist, rblist);
+	struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
+
+	str_node__delete(snode, slist->dupstr);
+}
+
+static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
+{
+	const char *str = entry;
+	struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
+
+	return strcmp(snode->s, str);
+}
+
 int strlist__add(struct strlist *self, const char *new_entry)
 {
-	struct rb_node **p = &self->entries.rb_node;
-	struct rb_node *parent = NULL;
-	struct str_node *sn;
-
-	while (*p != NULL) {
-		int rc;
-
-		parent = *p;
-		sn = rb_entry(parent, struct str_node, rb_node);
-		rc = strcmp(sn->s, new_entry);
-
-		if (rc > 0)
-			p = &(*p)->rb_left;
-		else if (rc < 0)
-			p = &(*p)->rb_right;
-		else
-			return -EEXIST;
-	}
-
-	sn = str_node__new(new_entry, self->dupstr);
-	if (sn == NULL)
-		return -ENOMEM;
-
-	rb_link_node(&sn->rb_node, parent, p);
-	rb_insert_color(&sn->rb_node, &self->entries);
-	++self->nr_entries;
-
-	return 0;
+	return rblist__add_node(&self->rblist, new_entry);
 }
 
 int strlist__load(struct strlist *self, const char *filename)
@@ -96,34 +91,20 @@
 	return err;
 }
 
-void strlist__remove(struct strlist *self, struct str_node *sn)
+void strlist__remove(struct strlist *slist, struct str_node *snode)
 {
-	rb_erase(&sn->rb_node, &self->entries);
-	str_node__delete(sn, self->dupstr);
+	str_node__delete(snode, slist->dupstr);
 }
 
-struct str_node *strlist__find(struct strlist *self, const char *entry)
+struct str_node *strlist__find(struct strlist *slist, const char *entry)
 {
-	struct rb_node **p = &self->entries.rb_node;
-	struct rb_node *parent = NULL;
+	struct str_node *snode = NULL;
+	struct rb_node *rb_node = rblist__find(&slist->rblist, entry);
 
-	while (*p != NULL) {
-		struct str_node *sn;
-		int rc;
+	if (rb_node)
+		snode = container_of(rb_node, struct str_node, rb_node);
 
-		parent = *p;
-		sn = rb_entry(parent, struct str_node, rb_node);
-		rc = strcmp(sn->s, entry);
-
-		if (rc > 0)
-			p = &(*p)->rb_left;
-		else if (rc < 0)
-			p = &(*p)->rb_right;
-		else
-			return sn;
-	}
-
-	return NULL;
+	return snode;
 }
 
 static int strlist__parse_list_entry(struct strlist *self, const char *s)
@@ -156,9 +137,12 @@
 	struct strlist *self = malloc(sizeof(*self));
 
 	if (self != NULL) {
-		self->entries	 = RB_ROOT;
+		rblist__init(&self->rblist);
+		self->rblist.node_cmp    = strlist__node_cmp;
+		self->rblist.node_new    = strlist__node_new;
+		self->rblist.node_delete = strlist__node_delete;
+
 		self->dupstr	 = dupstr;
-		self->nr_entries = 0;
 		if (slist && strlist__parse_list(self, slist) != 0)
 			goto out_error;
 	}
@@ -171,30 +155,18 @@
 
 void strlist__delete(struct strlist *self)
 {
-	if (self != NULL) {
-		struct str_node *pos;
-		struct rb_node *next = rb_first(&self->entries);
-
-		while (next) {
-			pos = rb_entry(next, struct str_node, rb_node);
-			next = rb_next(&pos->rb_node);
-			strlist__remove(self, pos);
-		}
-		self->entries = RB_ROOT;
-		free(self);
-	}
+	if (self != NULL)
+		rblist__delete(&self->rblist);
 }
 
-struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
+struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
 {
-	struct rb_node *nd;
+	struct str_node *snode = NULL;
+	struct rb_node *rb_node;
 
-	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
-		struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
+	rb_node = rblist__entry(&slist->rblist, idx);
+	if (rb_node)
+		snode = container_of(rb_node, struct str_node, rb_node);
 
-		if (!idx--)
-			return pos;
-	}
-
-	return NULL;
+	return snode;
 }
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 3ba8390..dd9f922 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -4,14 +4,15 @@
 #include <linux/rbtree.h>
 #include <stdbool.h>
 
+#include "rblist.h"
+
 struct str_node {
 	struct rb_node rb_node;
 	const char     *s;
 };
 
 struct strlist {
-	struct rb_root entries;
-	unsigned int   nr_entries;
+	struct rblist rblist;
 	bool	       dupstr;
 };
 
@@ -32,18 +33,18 @@
 
 static inline bool strlist__empty(const struct strlist *self)
 {
-	return self->nr_entries == 0;
+	return rblist__empty(&self->rblist);
 }
 
 static inline unsigned int strlist__nr_entries(const struct strlist *self)
 {
-	return self->nr_entries;
+	return rblist__nr_entries(&self->rblist);
 }
 
 /* For strlist iteration */
 static inline struct str_node *strlist__first(struct strlist *self)
 {
-	struct rb_node *rn = rb_first(&self->entries);
+	struct rb_node *rn = rb_first(&self->rblist.entries);
 	return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
 }
 static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index fdad4eee..8b63b67 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -64,7 +64,7 @@
 	DSO_BINARY_TYPE__NOT_FOUND,
 };
 
-#define DSO_BINARY_TYPE__SYMTAB_CNT sizeof(binary_type_symtab)
+#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
 
 static enum dso_binary_type binary_type_data[] = {
 	DSO_BINARY_TYPE__BUILD_ID_CACHE,
@@ -72,7 +72,7 @@
 	DSO_BINARY_TYPE__NOT_FOUND,
 };
 
-#define DSO_BINARY_TYPE__DATA_CNT sizeof(binary_type_data)
+#define DSO_BINARY_TYPE__DATA_CNT ARRAY_SIZE(binary_type_data)
 
 int dso__name_len(const struct dso *dso)
 {
@@ -2875,6 +2875,7 @@
 	int i, items = 0;
 	char path[PATH_MAX];
 	pid_t pid;
+	char *endp;
 
 	if (symbol_conf.default_guest_vmlinux_name ||
 	    symbol_conf.default_guest_modules ||
@@ -2891,7 +2892,14 @@
 				/* Filter out . and .. */
 				continue;
 			}
-			pid = atoi(namelist[i]->d_name);
+			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
+			if ((*endp != '\0') ||
+			    (endp == namelist[i]->d_name) ||
+			    (errno == ERANGE)) {
+				pr_debug("invalid directory (%s). Skipping.\n",
+					 namelist[i]->d_name);
+				continue;
+			}
 			sprintf(path, "%s/%s/proc/kallsyms",
 				symbol_conf.guestmount,
 				namelist[i]->d_name);
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 3f59c49..051eaa6 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -110,7 +110,7 @@
 	int idx;
 	const char *msg;
 
-	BUG_ON(buflen > 0);
+	BUG_ON(buflen == 0);
 
 	if (errnum >= 0) {
 		const char *err = strerror_r(errnum, buf, buflen);