Merge branch 'net-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vxy/lksctp-dev

Add missing linux/vmalloc.h include to net/sctp/probe.c

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 53326fe..ab26bbc 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,8 +58,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME		"bnx2"
-#define DRV_MODULE_VERSION	"2.0.8"
-#define DRV_MODULE_RELDATE	"Feb 15, 2010"
+#define DRV_MODULE_VERSION	"2.0.9"
+#define DRV_MODULE_RELDATE	"April 27, 2010"
 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-5.0.0.j6.fw"
 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@ -651,9 +651,10 @@
 }
 
 static void
-bnx2_netif_stop(struct bnx2 *bp)
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
 {
-	bnx2_cnic_stop(bp);
+	if (stop_cnic)
+		bnx2_cnic_stop(bp);
 	if (netif_running(bp->dev)) {
 		int i;
 
@@ -671,14 +672,15 @@
 }
 
 static void
-bnx2_netif_start(struct bnx2 *bp)
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
 {
 	if (atomic_dec_and_test(&bp->intr_sem)) {
 		if (netif_running(bp->dev)) {
 			netif_tx_wake_all_queues(bp->dev);
 			bnx2_napi_enable(bp);
 			bnx2_enable_int(bp);
-			bnx2_cnic_start(bp);
+			if (start_cnic)
+				bnx2_cnic_start(bp);
 		}
 	}
 }
@@ -4758,8 +4760,12 @@
 		rc = bnx2_alloc_bad_rbuf(bp);
 	}
 
-	if (bp->flags & BNX2_FLAG_USING_MSIX)
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
 		bnx2_setup_msix_tbl(bp);
+		/* Prevent MSIX table reads and write from timing out */
+		REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+	}
 
 	return rc;
 }
@@ -6272,12 +6278,12 @@
 		return;
 	}
 
-	bnx2_netif_stop(bp);
+	bnx2_netif_stop(bp, true);
 
 	bnx2_init_nic(bp, 1);
 
 	atomic_set(&bp->intr_sem, 1);
-	bnx2_netif_start(bp);
+	bnx2_netif_start(bp, true);
 	rtnl_unlock();
 }
 
@@ -6319,7 +6325,7 @@
 	struct bnx2 *bp = netdev_priv(dev);
 
 	if (netif_running(dev))
-		bnx2_netif_stop(bp);
+		bnx2_netif_stop(bp, false);
 
 	bp->vlgrp = vlgrp;
 
@@ -6330,7 +6336,7 @@
 	if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
 
-	bnx2_netif_start(bp);
+	bnx2_netif_start(bp, false);
 }
 #endif
 
@@ -7050,9 +7056,9 @@
 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
 
 	if (netif_running(bp->dev)) {
-		bnx2_netif_stop(bp);
+		bnx2_netif_stop(bp, true);
 		bnx2_init_nic(bp, 0);
-		bnx2_netif_start(bp);
+		bnx2_netif_start(bp, true);
 	}
 
 	return 0;
@@ -7082,7 +7088,7 @@
 		/* Reset will erase chipset stats; save them */
 		bnx2_save_stats(bp);
 
-		bnx2_netif_stop(bp);
+		bnx2_netif_stop(bp, true);
 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
 		bnx2_free_skbs(bp);
 		bnx2_free_mem(bp);
@@ -7110,7 +7116,7 @@
 			bnx2_setup_cnic_irq_info(bp);
 		mutex_unlock(&bp->cnic_lock);
 #endif
-		bnx2_netif_start(bp);
+		bnx2_netif_start(bp, true);
 	}
 	return 0;
 }
@@ -7363,7 +7369,7 @@
 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
 		int i;
 
-		bnx2_netif_stop(bp);
+		bnx2_netif_stop(bp, true);
 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
 		bnx2_free_skbs(bp);
 
@@ -7382,7 +7388,7 @@
 			bnx2_shutdown_chip(bp);
 		else {
 			bnx2_init_nic(bp, 1);
-			bnx2_netif_start(bp);
+			bnx2_netif_start(bp, true);
 		}
 
 		/* wait for link up */
@@ -8376,7 +8382,7 @@
 		return 0;
 
 	flush_scheduled_work();
-	bnx2_netif_stop(bp);
+	bnx2_netif_stop(bp, true);
 	netif_device_detach(dev);
 	del_timer_sync(&bp->timer);
 	bnx2_shutdown_chip(bp);
@@ -8398,7 +8404,7 @@
 	bnx2_set_power_state(bp, PCI_D0);
 	netif_device_attach(dev);
 	bnx2_init_nic(bp, 1);
-	bnx2_netif_start(bp);
+	bnx2_netif_start(bp, true);
 	return 0;
 }
 
@@ -8425,7 +8431,7 @@
 	}
 
 	if (netif_running(dev)) {
-		bnx2_netif_stop(bp);
+		bnx2_netif_stop(bp, true);
 		del_timer_sync(&bp->timer);
 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
 	}
@@ -8482,7 +8488,7 @@
 
 	rtnl_lock();
 	if (netif_running(dev))
-		bnx2_netif_start(bp);
+		bnx2_netif_start(bp, true);
 
 	netif_device_attach(dev);
 	rtnl_unlock();
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 904bd6b..d13760d 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -5020,6 +5020,9 @@
 	reg16 &= ~state;
 	pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
 
+	if (!pdev->bus->self)
+		return;
+
 	pos = pci_pcie_cap(pdev->bus->self);
 	pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
 	reg16 &= ~state;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 0cef967..5267c27 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1567,9 +1567,9 @@
 		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
 		gfar_write(&regs->dmactrl, tempval);
 
-		while (!(gfar_read(&regs->ievent) &
-			 (IEVENT_GRSC | IEVENT_GTSC)))
-			cpu_relax();
+		spin_event_timeout(((gfar_read(&regs->ievent) &
+			 (IEVENT_GRSC | IEVENT_GTSC)) ==
+			 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
 	}
 }
 
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ec6bcc0..79c35ae 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -104,10 +104,6 @@
 #define MAX_EMULATION_MAC_ADDRS         16
 #define VMDQ_P(p)   ((p) + adapter->num_vfs)
 
-#define IXGBE_SUBDEV_ID_82598AF_MEZZ		0x0049
-#define IXGBE_SUBDEV_ID_82598AF_MENLO_Q_MEZZ	0x004a
-#define IXGBE_SUBDEV_ID_82598AF_MENLO_E_MEZZ	0x004b
-
 struct vf_data_storage {
 	unsigned char vf_mac_addresses[ETH_ALEN];
 	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ff59f88..2ae5a51 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4314,9 +4314,6 @@
 	int err = 0;
 	int vector, v_budget;
 
-	if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE))
-		goto try_msi;
-
 	/*
 	 * It's easy to be greedy for MSI-X vectors, but it really
 	 * doesn't do us much good if we have a lot more vectors
@@ -4348,7 +4345,7 @@
 		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
 			goto out;
 	}
-try_msi:
+
 	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -4629,18 +4626,6 @@
 	adapter->ring_feature[RING_F_RSS].indices = rss;
 	adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
 	adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-	adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE;
-	if (adapter->hw.device_id == IXGBE_DEV_ID_82598AF_DUAL_PORT) {
-		switch (adapter->hw.subsystem_device_id) {
-		case IXGBE_SUBDEV_ID_82598AF_MEZZ:
-		case IXGBE_SUBDEV_ID_82598AF_MENLO_Q_MEZZ:
-		case IXGBE_SUBDEV_ID_82598AF_MENLO_E_MEZZ:
-			adapter->flags &= ~IXGBE_FLAG_MSIX_CAPABLE;
-			break;
-		default:
-			break;
-		}
-	}
 	if (hw->mac.type == ixgbe_mac_82598EB) {
 		if (hw->device_id == IXGBE_DEV_ID_82598AT)
 			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index d97e1fd..1c4110d 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -37,6 +37,7 @@
 struct macvtap_queue {
 	struct sock sk;
 	struct socket sock;
+	struct socket_wq wq;
 	struct macvlan_dev *vlan;
 	struct file *file;
 	unsigned int flags;
@@ -242,12 +243,15 @@
 
 static void macvtap_sock_write_space(struct sock *sk)
 {
+	wait_queue_head_t *wqueue;
+
 	if (!sock_writeable(sk) ||
 	    !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
 		return;
 
-	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | POLLWRNORM | POLLWRBAND);
+	wqueue = sk_sleep(sk);
+	if (wqueue && waitqueue_active(wqueue))
+		wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
 }
 
 static int macvtap_open(struct inode *inode, struct file *file)
@@ -272,7 +276,8 @@
 	if (!q)
 		goto out;
 
-	init_waitqueue_head(&q->sock.wait);
+	q->sock.wq = &q->wq;
+	init_waitqueue_head(&q->wq.wait);
 	q->sock.type = SOCK_RAW;
 	q->sock.state = SS_CONNECTED;
 	q->sock.file = file;
@@ -308,7 +313,7 @@
 		goto out;
 
 	mask = 0;
-	poll_wait(file, &q->sock.wait, wait);
+	poll_wait(file, &q->wq.wait, wait);
 
 	if (!skb_queue_empty(&q->sk.sk_receive_queue))
 		mask |= POLLIN | POLLRDNORM;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 408f3d7..949ac1a 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1804,23 +1804,30 @@
     SMC_SELECT_BANK(1);
     media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
 
+    SMC_SELECT_BANK(saved_bank);
+    spin_unlock_irqrestore(&smc->lock, flags);
+
     /* Check for pending interrupt with watchdog flag set: with
        this, we can limp along even if the interrupt is blocked */
     if (smc->watchdog++ && ((i>>8) & i)) {
 	if (!smc->fast_poll)
 	    printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+	local_irq_save(flags);
 	smc_interrupt(dev->irq, dev);
+	local_irq_restore(flags);
 	smc->fast_poll = HZ;
     }
     if (smc->fast_poll) {
 	smc->fast_poll--;
 	smc->media.expires = jiffies + HZ/100;
 	add_timer(&smc->media);
-	SMC_SELECT_BANK(saved_bank);
-	spin_unlock_irqrestore(&smc->lock, flags);
 	return;
     }
 
+    spin_lock_irqsave(&smc->lock, flags);
+
+    saved_bank = inw(ioaddr + BANK_SELECT);
+
     if (smc->cfg & CFG_MII_SELECT) {
 	if (smc->mii_if.phy_id < 0)
 	    goto reschedule;
@@ -1978,15 +1985,16 @@
 	unsigned int ioaddr = dev->base_addr;
 	u16 saved_bank = inw(ioaddr + BANK_SELECT);
 	int ret;
+	unsigned long flags;
 
-	spin_lock_irq(&smc->lock);
+	spin_lock_irqsave(&smc->lock, flags);
 	SMC_SELECT_BANK(3);
 	if (smc->cfg & CFG_MII_SELECT)
 		ret = mii_ethtool_gset(&smc->mii_if, ecmd);
 	else
 		ret = smc_netdev_get_ecmd(dev, ecmd);
 	SMC_SELECT_BANK(saved_bank);
-	spin_unlock_irq(&smc->lock);
+	spin_unlock_irqrestore(&smc->lock, flags);
 	return ret;
 }
 
@@ -1996,15 +2004,16 @@
 	unsigned int ioaddr = dev->base_addr;
 	u16 saved_bank = inw(ioaddr + BANK_SELECT);
 	int ret;
+	unsigned long flags;
 
-	spin_lock_irq(&smc->lock);
+	spin_lock_irqsave(&smc->lock, flags);
 	SMC_SELECT_BANK(3);
 	if (smc->cfg & CFG_MII_SELECT)
 		ret = mii_ethtool_sset(&smc->mii_if, ecmd);
 	else
 		ret = smc_netdev_set_ecmd(dev, ecmd);
 	SMC_SELECT_BANK(saved_bank);
-	spin_unlock_irq(&smc->lock);
+	spin_unlock_irqrestore(&smc->lock, flags);
 	return ret;
 }
 
@@ -2014,12 +2023,13 @@
 	unsigned int ioaddr = dev->base_addr;
 	u16 saved_bank = inw(ioaddr + BANK_SELECT);
 	u32 ret;
+	unsigned long flags;
 
-	spin_lock_irq(&smc->lock);
+	spin_lock_irqsave(&smc->lock, flags);
 	SMC_SELECT_BANK(3);
 	ret = smc_link_ok(dev);
 	SMC_SELECT_BANK(saved_bank);
-	spin_unlock_irq(&smc->lock);
+	spin_unlock_irqrestore(&smc->lock, flags);
 	return ret;
 }
 
@@ -2056,16 +2066,17 @@
 	int rc = 0;
 	u16 saved_bank;
 	unsigned int ioaddr = dev->base_addr;
+	unsigned long flags;
 
 	if (!netif_running(dev))
 		return -EINVAL;
 
-	spin_lock_irq(&smc->lock);
+	spin_lock_irqsave(&smc->lock, flags);
 	saved_bank = inw(ioaddr + BANK_SELECT);
 	SMC_SELECT_BANK(3);
 	rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
 	SMC_SELECT_BANK(saved_bank);
-	spin_unlock_irq(&smc->lock);
+	spin_unlock_irqrestore(&smc->lock, flags);
 	return rc;
 }
 
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fc5938ba..a527e37 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -88,6 +88,11 @@
 	---help---
 	  Supports the LSI ET1011C PHY.
 
+config MICREL_PHY
+	tristate "Driver for Micrel PHYs"
+	---help---
+	  Supports the KSZ9021, VSC8201, KS8001 PHYs.
+
 config FIXED_PHY
 	bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
 	depends on PHYLIB=y
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 1342585..13bebab 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,4 +20,5 @@
 obj-$(CONFIG_MDIO_GPIO)		+= mdio-gpio.o
 obj-$(CONFIG_NATIONAL_PHY)	+= national.o
 obj-$(CONFIG_STE10XP)		+= ste10Xp.o
+obj-$(CONFIG_MICREL_PHY)	+= micrel.o
 obj-$(CONFIG_MDIO_OCTEON)	+= mdio-octeon.o
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
new file mode 100644
index 0000000..68dd107
--- /dev/null
+++ b/drivers/net/phy/micrel.c
@@ -0,0 +1,113 @@
+/*
+ * drivers/net/phy/micrel.c
+ *
+ * Driver for Micrel PHYs
+ *
+ * Author: David J. Choi
+ *
+ * Copyright (c) 2010 Micrel, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Support : ksz9021 , vsc8201, ks8001
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define	PHY_ID_KSZ9021			0x00221611
+#define	PHY_ID_VSC8201			0x000FC413
+#define	PHY_ID_KS8001			0x0022161A
+
+
+static int kszphy_config_init(struct phy_device *phydev)
+{
+	return 0;
+}
+
+
+static struct phy_driver ks8001_driver = {
+	.phy_id		= PHY_ID_KS8001,
+	.phy_id_mask	= 0x00fffff0,
+	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_POLL,
+	.config_init	= kszphy_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver vsc8201_driver = {
+	.phy_id		= PHY_ID_VSC8201,
+	.name		= "Micrel VSC8201",
+	.phy_id_mask	= 0x00fffff0,
+	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_POLL,
+	.config_init	= kszphy_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver ksz9021_driver = {
+	.phy_id		= PHY_ID_KSZ9021,
+	.phy_id_mask	= 0x000fff10,
+	.name		= "Micrel KSZ9021 Gigabit PHY",
+	.features	= PHY_GBIT_FEATURES | SUPPORTED_Pause,
+	.flags		= PHY_POLL,
+	.config_init	= kszphy_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.driver		= { .owner = THIS_MODULE, },
+};
+
+static int __init ksphy_init(void)
+{
+	int ret;
+
+	ret = phy_driver_register(&ks8001_driver);
+	if (ret)
+		goto err1;
+	ret = phy_driver_register(&vsc8201_driver);
+	if (ret)
+		goto err2;
+
+	ret = phy_driver_register(&ksz9021_driver);
+	if (ret)
+		goto err3;
+	return 0;
+
+err3:
+	phy_driver_unregister(&vsc8201_driver);
+err2:
+	phy_driver_unregister(&ks8001_driver);
+err1:
+	return ret;
+}
+
+static void __exit ksphy_exit(void)
+{
+	phy_driver_unregister(&ks8001_driver);
+	phy_driver_unregister(&vsc8201_driver);
+	phy_driver_unregister(&ksz9021_driver);
+}
+
+module_init(ksphy_init);
+module_exit(ksphy_exit);
+
+MODULE_DESCRIPTION("Micrel PHY driver");
+MODULE_AUTHOR("David J. Choi");
+MODULE_LICENSE("GPL");
+
+static struct mdio_device_id micrel_tbl[] = {
+	{ PHY_ID_KSZ9021, 0x000fff10 },
+	{ PHY_ID_VSC8201, 0x00fffff0 },
+	{ PHY_ID_KS8001, 0x00fffff0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(mdio, micrel_tbl);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 35f1953..5441688 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -405,6 +405,7 @@
 	DECLARE_WAITQUEUE(wait, current);
 	ssize_t ret;
 	struct sk_buff *skb = NULL;
+	struct iovec iov;
 
 	ret = count;
 
@@ -448,7 +449,9 @@
 	if (skb->len > count)
 		goto outf;
 	ret = -EFAULT;
-	if (copy_to_user(buf, skb->data, skb->len))
+	iov.iov_base = buf;
+	iov.iov_len = count;
+	if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
 		goto outf;
 	ret = skb->len;
 
@@ -1567,13 +1570,22 @@
 	struct channel *pch = chan->ppp;
 	int proto;
 
-	if (!pch || skb->len == 0) {
+	if (!pch) {
 		kfree_skb(skb);
 		return;
 	}
 
-	proto = PPP_PROTO(skb);
 	read_lock_bh(&pch->upl);
+	if (!pskb_may_pull(skb, 2)) {
+		kfree_skb(skb);
+		if (pch->ppp) {
+			++pch->ppp->dev->stats.rx_length_errors;
+			ppp_receive_error(pch->ppp);
+		}
+		goto done;
+	}
+
+	proto = PPP_PROTO(skb);
 	if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
 		/* put it on the channel queue */
 		skb_queue_tail(&pch->file.rq, skb);
@@ -1585,6 +1597,8 @@
 	} else {
 		ppp_do_recv(pch->ppp, skb, pch);
 	}
+
+done:
 	read_unlock_bh(&pch->upl);
 }
 
@@ -1617,7 +1631,8 @@
 static void
 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
 {
-	if (pskb_may_pull(skb, 2)) {
+	/* note: a 0-length skb is used as an error indication */
+	if (skb->len > 0) {
 #ifdef CONFIG_PPP_MULTILINK
 		/* XXX do channel-level decompression here */
 		if (PPP_PROTO(skb) == PPP_MP)
@@ -1625,15 +1640,10 @@
 		else
 #endif /* CONFIG_PPP_MULTILINK */
 			ppp_receive_nonmp_frame(ppp, skb);
-		return;
+	} else {
+		kfree_skb(skb);
+		ppp_receive_error(ppp);
 	}
-
-	if (skb->len > 0)
-		/* note: a 0-length skb is used as an error indication */
-		++ppp->dev->stats.rx_length_errors;
-
-	kfree_skb(skb);
-	ppp_receive_error(ppp);
 }
 
 static void
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index ba4770a..fec3c29 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2256,17 +2256,36 @@
 
 	sc->mii_bus = mdiobus_alloc();
 	if (sc->mii_bus == NULL) {
-		sbmac_uninitctx(sc);
-		return -ENOMEM;
+		err = -ENOMEM;
+		goto uninit_ctx;
 	}
 
+	sc->mii_bus->name = sbmac_mdio_string;
+	snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
+	sc->mii_bus->priv = sc;
+	sc->mii_bus->read = sbmac_mii_read;
+	sc->mii_bus->write = sbmac_mii_write;
+	sc->mii_bus->irq = sc->phy_irq;
+	for (i = 0; i < PHY_MAX_ADDR; ++i)
+		sc->mii_bus->irq[i] = SBMAC_PHY_INT;
+
+	sc->mii_bus->parent = &pldev->dev;
+	/*
+	 * Probe PHY address
+	 */
+	err = mdiobus_register(sc->mii_bus);
+	if (err) {
+		printk(KERN_ERR "%s: unable to register MDIO bus\n",
+		       dev->name);
+		goto free_mdio;
+	}
+	dev_set_drvdata(&pldev->dev, sc->mii_bus);
+
 	err = register_netdev(dev);
 	if (err) {
 		printk(KERN_ERR "%s.%d: unable to register netdev\n",
 		       sbmac_string, idx);
-		mdiobus_free(sc->mii_bus);
-		sbmac_uninitctx(sc);
-		return err;
+		goto unreg_mdio;
 	}
 
 	pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
@@ -2282,19 +2301,15 @@
 	pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
 	       dev->name, base, eaddr);
 
-	sc->mii_bus->name = sbmac_mdio_string;
-	snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
-	sc->mii_bus->priv = sc;
-	sc->mii_bus->read = sbmac_mii_read;
-	sc->mii_bus->write = sbmac_mii_write;
-	sc->mii_bus->irq = sc->phy_irq;
-	for (i = 0; i < PHY_MAX_ADDR; ++i)
-		sc->mii_bus->irq[i] = SBMAC_PHY_INT;
-
-	sc->mii_bus->parent = &pldev->dev;
-	dev_set_drvdata(&pldev->dev, sc->mii_bus);
-
 	return 0;
+unreg_mdio:
+	mdiobus_unregister(sc->mii_bus);
+	dev_set_drvdata(&pldev->dev, NULL);
+free_mdio:
+	mdiobus_free(sc->mii_bus);
+uninit_ctx:
+	sbmac_uninitctx(sc);
+	return err;
 }
 
 
@@ -2320,16 +2335,6 @@
 		goto out_err;
 	}
 
-	/*
-	 * Probe PHY address
-	 */
-	err = mdiobus_register(sc->mii_bus);
-	if (err) {
-		printk(KERN_ERR "%s: unable to register MDIO bus\n",
-		       dev->name);
-		goto out_unirq;
-	}
-
 	sc->sbm_speed = sbmac_speed_none;
 	sc->sbm_duplex = sbmac_duplex_none;
 	sc->sbm_fc = sbmac_fc_none;
@@ -2360,11 +2365,7 @@
 	return 0;
 
 out_unregister:
-	mdiobus_unregister(sc->mii_bus);
-
-out_unirq:
 	free_irq(dev->irq, dev);
-
 out_err:
 	return err;
 }
@@ -2553,9 +2554,6 @@
 
 	phy_disconnect(sc->phy_dev);
 	sc->phy_dev = NULL;
-
-	mdiobus_unregister(sc->mii_bus);
-
 	free_irq(dev->irq, dev);
 
 	sbdma_emptyring(&(sc->sbm_txdma));
@@ -2662,6 +2660,7 @@
 
 	unregister_netdev(dev);
 	sbmac_uninitctx(sc);
+	mdiobus_unregister(sc->mii_bus);
 	mdiobus_free(sc->mii_bus);
 	iounmap(sc->sbm_base);
 	free_netdev(dev);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index bc75ef6..1564605 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1870,6 +1870,7 @@
 	}
 
 	if (disabled) {
+		dev_close(efx->net_dev);
 		EFX_ERR(efx, "has been disabled\n");
 		efx->state = STATE_DISABLED;
 	} else {
@@ -1893,8 +1894,7 @@
 	}
 
 	rtnl_lock();
-	if (efx_reset(efx, efx->reset_pending))
-		dev_close(efx->net_dev);
+	(void)efx_reset(efx, efx->reset_pending);
 	rtnl_unlock();
 }
 
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index f7df24d..655b697 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1326,7 +1326,9 @@
 
 	EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
 
-	falcon_probe_board(efx, board_rev);
+	rc = falcon_probe_board(efx, board_rev);
+	if (rc)
+		goto fail2;
 
 	kfree(nvconfig);
 	return 0;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 5712fdd..c7a933a 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -728,15 +728,7 @@
 	},
 };
 
-static const struct falcon_board_type falcon_dummy_board = {
-	.init		= efx_port_dummy_op_int,
-	.init_phy	= efx_port_dummy_op_void,
-	.fini		= efx_port_dummy_op_void,
-	.set_id_led	= efx_port_dummy_op_set_id_led,
-	.monitor	= efx_port_dummy_op_int,
-};
-
-void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
 {
 	struct falcon_board *board = falcon_board(efx);
 	u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@
 			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
 			 ? board->type->ref_model : board->type->gen_type,
 			 'A' + board->major, board->minor);
+		return 0;
 	} else {
 		EFX_ERR(efx, "unknown board type %d\n", type_id);
-		board->type = &falcon_dummy_board;
+		return -ENODEV;
 	}
 }
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 5825f37..bbc2c0c 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -158,7 +158,7 @@
  **************************************************************************
  */
 
-extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
 
 /* TX data path */
 extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 7bf93fa..727b422 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -475,8 +475,17 @@
 
 static void siena_update_nic_stats(struct efx_nic *efx)
 {
-	while (siena_try_update_nic_stats(efx) == -EAGAIN)
-		cpu_relax();
+	int retry;
+
+	/* If we're unlucky enough to read statistics wduring the DMA, wait
+	 * up to 10ms for it to finish (typically takes <500us) */
+	for (retry = 0; retry < 100; ++retry) {
+		if (siena_try_update_nic_stats(efx) == 0)
+			return;
+		udelay(100);
+	}
+
+	/* Use the old values instead */
 }
 
 static void siena_start_nic_stats(struct efx_nic *efx)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 20a1793..e525a6c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,7 +109,7 @@
 
 	struct tap_filter       txflt;
 	struct socket		socket;
-
+	struct socket_wq	wq;
 #ifdef TUN_DEBUG
 	int debug;
 #endif
@@ -323,7 +323,7 @@
 	/* Inform the methods they need to stop using the dev.
 	 */
 	if (tfile) {
-		wake_up_all(&tun->socket.wait);
+		wake_up_all(&tun->wq.wait);
 		if (atomic_dec_and_test(&tfile->count))
 			__tun_detach(tun);
 	}
@@ -398,7 +398,7 @@
 	/* Notify and wake up reader process */
 	if (tun->flags & TUN_FASYNC)
 		kill_fasync(&tun->fasync, SIGIO, POLL_IN);
-	wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+	wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
 				   POLLRDNORM | POLLRDBAND);
 	return NETDEV_TX_OK;
 
@@ -498,7 +498,7 @@
 
 	DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
 
-	poll_wait(file, &tun->socket.wait, wait);
+	poll_wait(file, &tun->wq.wait, wait);
 
 	if (!skb_queue_empty(&sk->sk_receive_queue))
 		mask |= POLLIN | POLLRDNORM;
@@ -773,7 +773,7 @@
 
 	DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
 
-	add_wait_queue(&tun->socket.wait, &wait);
+	add_wait_queue(&tun->wq.wait, &wait);
 	while (len) {
 		current->state = TASK_INTERRUPTIBLE;
 
@@ -804,7 +804,7 @@
 	}
 
 	current->state = TASK_RUNNING;
-	remove_wait_queue(&tun->socket.wait, &wait);
+	remove_wait_queue(&tun->wq.wait, &wait);
 
 	return ret;
 }
@@ -861,6 +861,7 @@
 static void tun_sock_write_space(struct sock *sk)
 {
 	struct tun_struct *tun;
+	wait_queue_head_t *wqueue;
 
 	if (!sock_writeable(sk))
 		return;
@@ -868,8 +869,9 @@
 	if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
 		return;
 
-	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
+	wqueue = sk_sleep(sk);
+	if (wqueue && waitqueue_active(wqueue))
+		wake_up_interruptible_sync_poll(wqueue, POLLOUT |
 						POLLWRNORM | POLLWRBAND);
 
 	tun = tun_sk(sk)->tun;
@@ -1039,7 +1041,8 @@
 		if (!sk)
 			goto err_free_dev;
 
-		init_waitqueue_head(&tun->socket.wait);
+		tun->socket.wq = &tun->wq;
+		init_waitqueue_head(&tun->wq.wait);
 		tun->socket.ops = &tun_socket_ops;
 		sock_init_data(&tun->socket, sk);
 		sk->sk_write_space = tun_sock_write_space;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 63be4ca..d7b7018 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -397,4 +397,13 @@
 
 	  For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
 
+config USB_SIERRA_NET
+	tristate "USB-to-WWAN Driver for Sierra Wireless modems"
+	depends on USB_USBNET
+	help
+	  Choose this option if you have a Sierra Wireless USB-to-WWAN device.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sierra_net.
+
 endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index edb09c0..b13a279 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -24,4 +24,5 @@
 obj-$(CONFIG_USB_NET_INT51X1)	+= int51x1.o
 obj-$(CONFIG_USB_CDC_PHONET)	+= cdc-phonet.o
 obj-$(CONFIG_USB_IPHETH)	+= ipheth.o
+obj-$(CONFIG_USB_SIERRA_NET)	+= sierra_net.o
 
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 811b2dc..b3fe0de 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -466,6 +466,7 @@
 	.bind = 	cdc_bind,
 	.unbind =	usbnet_cdc_unbind,
 	.status =	cdc_status,
+	.manage_power =	cdc_manage_power,
 };
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 291add2..47634b6 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -240,7 +240,7 @@
 		goto out;
 
 	dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
-	dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14);
+	dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
 
 	for (i = 0; i < DM_TIMEOUT; i++) {
 		u8 tmp;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index fd10331..418825d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -122,25 +122,25 @@
 
 	tx_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (tx_urb == NULL)
-		goto error;
+		goto error_nomem;
 
 	rx_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (rx_urb == NULL)
-		goto error;
+		goto free_tx_urb;
 
 	tx_buf = usb_buffer_alloc(iphone->udev,
 				  IPHETH_BUF_SIZE,
 				  GFP_KERNEL,
 				  &tx_urb->transfer_dma);
 	if (tx_buf == NULL)
-		goto error;
+		goto free_rx_urb;
 
 	rx_buf = usb_buffer_alloc(iphone->udev,
 				  IPHETH_BUF_SIZE,
 				  GFP_KERNEL,
 				  &rx_urb->transfer_dma);
 	if (rx_buf == NULL)
-		goto error;
+		goto free_tx_buf;
 
 
 	iphone->tx_urb = tx_urb;
@@ -149,13 +149,14 @@
 	iphone->rx_buf = rx_buf;
 	return 0;
 
-error:
-	usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, rx_buf,
-			rx_urb->transfer_dma);
+free_tx_buf:
 	usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
 			tx_urb->transfer_dma);
+free_rx_urb:
 	usb_free_urb(rx_urb);
+free_tx_urb:
 	usb_free_urb(tx_urb);
+error_nomem:
 	return -ENOMEM;
 }
 
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 52671ea..c4c334d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -145,6 +145,7 @@
 	{ USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
 	{ USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
 	{ USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
+	{ USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
 	{ USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
 	{ USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
 	{ USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
new file mode 100644
index 0000000..f1942d6
--- /dev/null
+++ b/drivers/net/usb/sierra_net.c
@@ -0,0 +1,1004 @@
+/*
+ * USB-to-WWAN Driver for Sierra Wireless modems
+ *
+ * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer
+ *                          <linux@sierrawireless.com>
+ *
+ * Portions of this based on the cdc_ether driver by David Brownell (2003-2005)
+ * and Ole Andre Vadla Ravnas (ActiveSync) (2006).
+ *
+ * IMPORTANT DISCLAIMER: This driver is not commercially supported by
+ * Sierra Wireless. Use at your own risk.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#define DRIVER_VERSION "v.2.0"
+#define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer"
+#define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems"
+static const char driver_name[] = "sierra_net";
+
+/* if defined debug messages enabled */
+/*#define	DEBUG*/
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <asm/unaligned.h>
+#include <linux/usb/usbnet.h>
+
+#define SWI_USB_REQUEST_GET_FW_ATTR	0x06
+#define SWI_GET_FW_ATTR_MASK		0x08
+
+/* atomic counter partially included in MAC address to make sure 2 devices
+ * do not end up with the same MAC - concept breaks in case of > 255 ifaces
+ */
+static	atomic_t iface_counter = ATOMIC_INIT(0);
+
+/*
+ * SYNC Timer Delay definition used to set the expiry time
+ */
+#define SIERRA_NET_SYNCDELAY (2*HZ)
+
+/* Max. MTU supported. The modem buffers are limited to 1500 */
+#define SIERRA_NET_MAX_SUPPORTED_MTU	1500
+
+/* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control
+ * message reception ... and thus the max. received packet.
+ * (May be the cause for parse_hip returning -EINVAL)
+ */
+#define SIERRA_NET_USBCTL_BUF_LEN	1024
+
+/* list of interface numbers - used for constructing interface lists */
+struct sierra_net_iface_info {
+	const u32 infolen;	/* number of interface numbers on list */
+	const u8  *ifaceinfo;	/* pointer to the array holding the numbers */
+};
+
+struct sierra_net_info_data {
+	u16 rx_urb_size;
+	struct sierra_net_iface_info whitelist;
+};
+
+/* Private data structure */
+struct sierra_net_data {
+
+	u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
+
+	u16 link_up;		/* air link up or down */
+	u8 tx_hdr_template[4];	/* part of HIP hdr for tx'd packets */
+
+	u8 sync_msg[4];		/* SYNC message */
+	u8 shdwn_msg[4];	/* Shutdown message */
+
+	/* Backpointer to the container */
+	struct usbnet *usbnet;
+
+	u8 ifnum;	/* interface number */
+
+/* Bit masks, must be a power of 2 */
+#define SIERRA_NET_EVENT_RESP_AVAIL    0x01
+#define SIERRA_NET_TIMER_EXPIRY        0x02
+	unsigned long kevent_flags;
+	struct work_struct sierra_net_kevent;
+	struct timer_list sync_timer; /* For retrying SYNC sequence */
+};
+
+struct param {
+	int is_present;
+	union {
+		void  *ptr;
+		u32    dword;
+		u16    word;
+		u8     byte;
+	};
+};
+
+/* HIP message type */
+#define SIERRA_NET_HIP_EXTENDEDID	0x7F
+#define SIERRA_NET_HIP_HSYNC_ID		0x60	/* Modem -> host */
+#define SIERRA_NET_HIP_RESTART_ID	0x62	/* Modem -> host */
+#define SIERRA_NET_HIP_MSYNC_ID		0x20	/* Host -> modem */
+#define SIERRA_NET_HIP_SHUTD_ID		0x26	/* Host -> modem */
+
+#define SIERRA_NET_HIP_EXT_IP_IN_ID   0x0202
+#define SIERRA_NET_HIP_EXT_IP_OUT_ID  0x0002
+
+/* 3G UMTS Link Sense Indication definitions */
+#define SIERRA_NET_HIP_LSI_UMTSID	0x78
+
+/* Reverse Channel Grant Indication HIP message */
+#define SIERRA_NET_HIP_RCGI		0x64
+
+/* LSI Protocol types */
+#define SIERRA_NET_PROTOCOL_UMTS      0x01
+/* LSI Coverage */
+#define SIERRA_NET_COVERAGE_NONE      0x00
+#define SIERRA_NET_COVERAGE_NOPACKET  0x01
+
+/* LSI Session */
+#define SIERRA_NET_SESSION_IDLE       0x00
+/* LSI Link types */
+#define SIERRA_NET_AS_LINK_TYPE_IPv4  0x00
+
+struct lsi_umts {
+	u8 protocol;
+	u8 unused1;
+	__be16 length;
+	/* eventually use a union for the rest - assume umts for now */
+	u8 coverage;
+	u8 unused2[41];
+	u8 session_state;
+	u8 unused3[33];
+	u8 link_type;
+	u8 pdp_addr_len; /* NW-supplied PDP address len */
+	u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
+	u8 unused4[23];
+	u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */
+	u8 dns1_addr[16]; /* NW-supplied 1st DNS address */
+	u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */
+	u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/
+	u8 wins1_addr_len; /* NW-supplied 1st Wins address len */
+	u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/
+	u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */
+	u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */
+	u8 unused5[4];
+	u8 gw_addr_len; /* NW-supplied GW address len */
+	u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
+	u8 reserved[8];
+} __attribute__ ((packed));
+
+#define SIERRA_NET_LSI_COMMON_LEN      4
+#define SIERRA_NET_LSI_UMTS_LEN        (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
+	(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+
+/* Forward definitions */
+static void sierra_sync_timer(unsigned long syncdata);
+static int sierra_net_change_mtu(struct net_device *net, int new_mtu);
+
+/* Our own net device operations structure */
+static const struct net_device_ops sierra_net_device_ops = {
+	.ndo_open               = usbnet_open,
+	.ndo_stop               = usbnet_stop,
+	.ndo_start_xmit         = usbnet_start_xmit,
+	.ndo_tx_timeout         = usbnet_tx_timeout,
+	.ndo_change_mtu         = sierra_net_change_mtu,
+	.ndo_set_mac_address    = eth_mac_addr,
+	.ndo_validate_addr      = eth_validate_addr,
+};
+
+/* get private data associated with passed in usbnet device */
+static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev)
+{
+	return (struct sierra_net_data *)dev->data[0];
+}
+
+/* set private data associated with passed in usbnet device */
+static inline void sierra_net_set_private(struct usbnet *dev,
+			struct sierra_net_data *priv)
+{
+	dev->data[0] = (unsigned long)priv;
+}
+
+/* is packet IPv4 */
+static inline int is_ip(struct sk_buff *skb)
+{
+	return (skb->protocol == cpu_to_be16(ETH_P_IP));
+}
+
+/*
+ * check passed in packet and make sure that:
+ *  - it is linear (no scatter/gather)
+ *  - it is ethernet (mac_header properly set)
+ */
+static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev)
+{
+	skb_reset_mac_header(skb); /* ethernet header */
+
+	if (skb_is_nonlinear(skb)) {
+		netdev_err(dev->net, "Non linear buffer-dropping\n");
+		return 0;
+	}
+
+	if (!pskb_may_pull(skb, ETH_HLEN))
+		return 0;
+	skb->protocol = eth_hdr(skb)->h_proto;
+
+	return 1;
+}
+
+static const u8 *save16bit(struct param *p, const u8 *datap)
+{
+	p->is_present = 1;
+	p->word = get_unaligned_be16(datap);
+	return datap + sizeof(p->word);
+}
+
+static const u8 *save8bit(struct param *p, const u8 *datap)
+{
+	p->is_present = 1;
+	p->byte = *datap;
+	return datap + sizeof(p->byte);
+}
+
+/*----------------------------------------------------------------------------*
+ *                              BEGIN HIP                                     *
+ *----------------------------------------------------------------------------*/
+/* HIP header */
+#define SIERRA_NET_HIP_HDR_LEN 4
+/* Extended HIP header */
+#define SIERRA_NET_HIP_EXT_HDR_LEN 6
+
+struct hip_hdr {
+	int    hdrlen;
+	struct param payload_len;
+	struct param msgid;
+	struct param msgspecific;
+	struct param extmsgid;
+};
+
+static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh)
+{
+	const u8 *curp = buf;
+	int    padded;
+
+	if (buflen < SIERRA_NET_HIP_HDR_LEN)
+		return -EPROTO;
+
+	curp = save16bit(&hh->payload_len, curp);
+	curp = save8bit(&hh->msgid, curp);
+	curp = save8bit(&hh->msgspecific, curp);
+
+	padded = hh->msgid.byte & 0x80;
+	hh->msgid.byte &= 0x7F;			/* 7 bits */
+
+	hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID);
+	if (hh->extmsgid.is_present) {
+		if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN)
+			return -EPROTO;
+
+		hh->payload_len.word &= 0x3FFF; /* 14 bits */
+
+		curp = save16bit(&hh->extmsgid, curp);
+		hh->extmsgid.word &= 0x03FF;	/* 10 bits */
+
+		hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN;
+	} else {
+		hh->payload_len.word &= 0x07FF;	/* 11 bits */
+		hh->hdrlen = SIERRA_NET_HIP_HDR_LEN;
+	}
+
+	if (padded) {
+		hh->hdrlen++;
+		hh->payload_len.word--;
+	}
+
+	/* if real packet shorter than the claimed length */
+	if (buflen < (hh->hdrlen + hh->payload_len.word))
+		return -EINVAL;
+
+	return 0;
+}
+
+static void build_hip(u8 *buf, const u16 payloadlen,
+		struct sierra_net_data *priv)
+{
+	/* the following doesn't have the full functionality. We
+	 * currently build only one kind of header, so it is faster this way
+	 */
+	put_unaligned_be16(payloadlen, buf);
+	memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template));
+}
+/*----------------------------------------------------------------------------*
+ *                              END HIP                                       *
+ *----------------------------------------------------------------------------*/
+
+static int sierra_net_send_cmd(struct usbnet *dev,
+		u8 *cmd, int cmdlen, const char * cmd_name)
+{
+	struct sierra_net_data *priv = sierra_net_get_private(dev);
+	int  status;
+
+	status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+			USB_CDC_SEND_ENCAPSULATED_COMMAND,
+			USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE,	0,
+			priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
+
+	if (status != cmdlen && status != -ENODEV)
+		netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
+
+	return status;
+}
+
+static int sierra_net_send_sync(struct usbnet *dev)
+{
+	int  status;
+	struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+	dev_dbg(&dev->udev->dev, "%s", __func__);
+
+	status = sierra_net_send_cmd(dev, priv->sync_msg,
+			sizeof(priv->sync_msg), "SYNC");
+
+	return status;
+}
+
+static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
+{
+	dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
+	priv->tx_hdr_template[0] = 0x3F;
+	priv->tx_hdr_template[1] = ctx_ix;
+	*((u16 *)&priv->tx_hdr_template[2]) =
+		cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
+}
+
+static inline int sierra_net_is_valid_addrlen(u8 len)
+{
+	return (len == sizeof(struct in_addr));
+}
+
+static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
+{
+	struct lsi_umts *lsi = (struct lsi_umts *)data;
+
+	if (datalen < sizeof(struct lsi_umts)) {
+		netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
+				__func__, datalen,
+				sizeof(struct lsi_umts));
+		return -1;
+	}
+
+	if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
+		netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+				__func__, be16_to_cpu(lsi->length),
+				(u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
+		return -1;
+	}
+
+	/* Validate the protocol  - only support UMTS for now */
+	if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
+		netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
+			lsi->protocol);
+		return -1;
+	}
+
+	/* Validate the link type */
+	if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
+		netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+			lsi->link_type);
+		return -1;
+	}
+
+	/* Validate the coverage */
+	if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
+	   || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+		netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
+		return 0;
+	}
+
+	/* Validate the session state */
+	if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
+		netdev_err(dev->net, "Session idle, 0x%02x\n",
+			lsi->session_state);
+		return 0;
+	}
+
+	/* Set link_sense true */
+	return 1;
+}
+
+static void sierra_net_handle_lsi(struct usbnet *dev, char *data,
+		struct hip_hdr	*hh)
+{
+	struct sierra_net_data *priv = sierra_net_get_private(dev);
+	int link_up;
+
+	link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen,
+					hh->payload_len.word);
+	if (link_up < 0) {
+		netdev_err(dev->net, "Invalid LSI\n");
+		return;
+	}
+	if (link_up) {
+		sierra_net_set_ctx_index(priv, hh->msgspecific.byte);
+		priv->link_up = 1;
+		netif_carrier_on(dev->net);
+	} else {
+		priv->link_up = 0;
+		netif_carrier_off(dev->net);
+	}
+}
+
+static void sierra_net_dosync(struct usbnet *dev)
+{
+	int status;
+	struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+	dev_dbg(&dev->udev->dev, "%s", __func__);
+
+	/* tell modem we are ready */
+	status = sierra_net_send_sync(dev);
+	if (status < 0)
+		netdev_err(dev->net,
+			"Send SYNC failed, status %d\n", status);
+	status = sierra_net_send_sync(dev);
+	if (status < 0)
+		netdev_err(dev->net,
+			"Send SYNC failed, status %d\n", status);
+
+	/* Now, start a timer and make sure we get the Restart Indication */
+	priv->sync_timer.function = sierra_sync_timer;
+	priv->sync_timer.data = (unsigned long) dev;
+	priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY;
+	add_timer(&priv->sync_timer);
+}
+
+static void sierra_net_kevent(struct work_struct *work)
+{
+	struct sierra_net_data *priv =
+		container_of(work, struct sierra_net_data, sierra_net_kevent);
+	struct usbnet *dev = priv->usbnet;
+	int  len;
+	int  err;
+	u8  *buf;
+	u8   ifnum;
+
+	if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) {
+		clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags);
+
+		/* Query the modem for the LSI message */
+		buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
+		if (!buf) {
+			netdev_err(dev->net,
+				"failed to allocate buf for LS msg\n");
+			return;
+		}
+		ifnum = priv->ifnum;
+		len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+				USB_CDC_GET_ENCAPSULATED_RESPONSE,
+				USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
+				0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN,
+				USB_CTRL_SET_TIMEOUT);
+
+		if (len < 0) {
+			netdev_err(dev->net,
+				"usb_control_msg failed, status %d\n", len);
+		} else {
+			struct hip_hdr	hh;
+
+			dev_dbg(&dev->udev->dev, "%s: Received status message,"
+				" %04x bytes", __func__, len);
+
+			err = parse_hip(buf, len, &hh);
+			if (err) {
+				netdev_err(dev->net, "%s: Bad packet,"
+					" parse result %d\n", __func__, err);
+				kfree(buf);
+				return;
+			}
+
+			/* Validate packet length */
+			if (len != hh.hdrlen + hh.payload_len.word) {
+				netdev_err(dev->net, "%s: Bad packet, received"
+					" %d, expected %d\n",	__func__, len,
+					hh.hdrlen + hh.payload_len.word);
+				kfree(buf);
+				return;
+			}
+
+			/* Switch on received message types */
+			switch (hh.msgid.byte) {
+			case SIERRA_NET_HIP_LSI_UMTSID:
+				dev_dbg(&dev->udev->dev, "LSI for ctx:%d",
+					hh.msgspecific.byte);
+				sierra_net_handle_lsi(dev, buf, &hh);
+				break;
+			case SIERRA_NET_HIP_RESTART_ID:
+				dev_dbg(&dev->udev->dev, "Restart reported: %d,"
+						" stopping sync timer",
+						hh.msgspecific.byte);
+				/* Got sync resp - stop timer & clear mask */
+				del_timer_sync(&priv->sync_timer);
+				clear_bit(SIERRA_NET_TIMER_EXPIRY,
+					  &priv->kevent_flags);
+				break;
+			case SIERRA_NET_HIP_HSYNC_ID:
+				dev_dbg(&dev->udev->dev, "SYNC received");
+				err = sierra_net_send_sync(dev);
+				if (err < 0)
+					netdev_err(dev->net,
+						"Send SYNC failed %d\n", err);
+				break;
+			case SIERRA_NET_HIP_EXTENDEDID:
+				netdev_err(dev->net, "Unrecognized HIP msg, "
+					"extmsgid 0x%04x\n", hh.extmsgid.word);
+				break;
+			case SIERRA_NET_HIP_RCGI:
+				/* Ignored */
+				break;
+			default:
+				netdev_err(dev->net, "Unrecognized HIP msg, "
+					"msgid 0x%02x\n", hh.msgid.byte);
+				break;
+			}
+		}
+		kfree(buf);
+	}
+	/* The sync timer bit might be set */
+	if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) {
+		clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags);
+		dev_dbg(&dev->udev->dev, "Deferred sync timer expiry");
+		sierra_net_dosync(priv->usbnet);
+	}
+
+	if (priv->kevent_flags)
+		dev_dbg(&dev->udev->dev, "sierra_net_kevent done, "
+			"kevent_flags = 0x%lx", priv->kevent_flags);
+}
+
+static void sierra_net_defer_kevent(struct usbnet *dev, int work)
+{
+	struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+	set_bit(work, &priv->kevent_flags);
+	schedule_work(&priv->sierra_net_kevent);
+}
+
+/*
+ * Sync Retransmit Timer Handler. On expiry, kick the work queue
+ */
+void sierra_sync_timer(unsigned long syncdata)
+{
+	struct usbnet *dev = (struct usbnet *)syncdata;
+
+	dev_dbg(&dev->udev->dev, "%s", __func__);
+	/* Kick the tasklet */
+	sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY);
+}
+
+static void sierra_net_status(struct usbnet *dev, struct urb *urb)
+{
+	struct usb_cdc_notification *event;
+
+	dev_dbg(&dev->udev->dev, "%s", __func__);
+
+	if (urb->actual_length < sizeof *event)
+		return;
+
+	/* Add cases to handle other standard notifications. */
+	event = urb->transfer_buffer;
+	switch (event->bNotificationType) {
+	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+	case USB_CDC_NOTIFY_SPEED_CHANGE:
+		/* USB 305 sends those */
+		break;
+	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+		sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL);
+		break;
+	default:
+		netdev_err(dev->net, ": unexpected notification %02x!\n",
+				event->bNotificationType);
+		break;
+	}
+}
+
+static void sierra_net_get_drvinfo(struct net_device *net,
+		struct ethtool_drvinfo *info)
+{
+	/* Inherit standard device info */
+	usbnet_get_drvinfo(net, info);
+	strncpy(info->driver, driver_name, sizeof info->driver);
+	strncpy(info->version, DRIVER_VERSION, sizeof info->version);
+}
+
+static u32 sierra_net_get_link(struct net_device *net)
+{
+	struct usbnet *dev = netdev_priv(net);
+	/* Report link is down whenever the interface is down */
+	return sierra_net_get_private(dev)->link_up && netif_running(net);
+}
+
+static struct ethtool_ops sierra_net_ethtool_ops = {
+	.get_drvinfo = sierra_net_get_drvinfo,
+	.get_link = sierra_net_get_link,
+	.get_msglevel = usbnet_get_msglevel,
+	.set_msglevel = usbnet_set_msglevel,
+	.get_settings = usbnet_get_settings,
+	.set_settings = usbnet_set_settings,
+	.nway_reset = usbnet_nway_reset,
+};
+
+/* MTU can not be more than 1500 bytes, enforce it. */
+static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
+{
+	if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU)
+		return -EINVAL;
+
+	return usbnet_change_mtu(net, new_mtu);
+}
+
+static int is_whitelisted(const u8 ifnum,
+			const struct sierra_net_iface_info *whitelist)
+{
+	if (whitelist) {
+		const u8 *list = whitelist->ifaceinfo;
+		int i;
+
+		for (i = 0; i < whitelist->infolen; i++) {
+			if (list[i] == ifnum)
+				return 1;
+		}
+	}
+	return 0;
+}
+
+static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
+{
+	int result = 0;
+	u16 *attrdata;
+
+	attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
+	if (!attrdata)
+		return -ENOMEM;
+
+	result = usb_control_msg(
+			dev->udev,
+			usb_rcvctrlpipe(dev->udev, 0),
+			/* _u8 vendor specific request */
+			SWI_USB_REQUEST_GET_FW_ATTR,
+			USB_DIR_IN | USB_TYPE_VENDOR,	/* __u8 request type */
+			0x0000,		/* __u16 value not used */
+			0x0000,		/* __u16 index  not used */
+			attrdata,	/* char *data */
+			sizeof(*attrdata),		/* __u16 size */
+			USB_CTRL_SET_TIMEOUT);	/* int timeout */
+
+	if (result < 0) {
+		kfree(attrdata);
+		return -EIO;
+	}
+
+	*datap = *attrdata;
+
+	kfree(attrdata);
+	return result;
+}
+
+/*
+ * collects the bulk endpoints, the status endpoint.
+ */
+static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+	u8	ifacenum;
+	u8	numendpoints;
+	u16	fwattr = 0;
+	int	status;
+	struct ethhdr *eth;
+	struct sierra_net_data *priv;
+	static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
+		0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
+	static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
+		0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
+
+	struct sierra_net_info_data *data =
+			(struct sierra_net_info_data *)dev->driver_info->data;
+
+	dev_dbg(&dev->udev->dev, "%s", __func__);
+
+	ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
+	/* We only accept certain interfaces */
+	if (!is_whitelisted(ifacenum, &data->whitelist)) {
+		dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
+		return -ENODEV;
+	}
+	numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
+	/* We have three endpoints, bulk in and out, and a status */
+	if (numendpoints != 3) {
+		dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d",
+			numendpoints);
+		return -ENODEV;
+	}
+	/* Status endpoint set in usbnet_get_endpoints() */
+	dev->status = NULL;
+	status = usbnet_get_endpoints(dev, intf);
+	if (status < 0) {
+		dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)",
+			status);
+		return -ENODEV;
+	}
+	/* Initialize sierra private data */
+	priv = kzalloc(sizeof *priv, GFP_KERNEL);
+	if (!priv) {
+		dev_err(&dev->udev->dev, "No memory");
+		return -ENOMEM;
+	}
+
+	priv->usbnet = dev;
+	priv->ifnum = ifacenum;
+	dev->net->netdev_ops = &sierra_net_device_ops;
+
+	/* change MAC addr to include, ifacenum, and to be unique */
+	dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
+	dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+
+	/* we will have to manufacture ethernet headers, prepare template */
+	eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
+	memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
+	eth->h_proto = cpu_to_be16(ETH_P_IP);
+
+	/* prepare shutdown message template */
+	memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
+	/* set context index initially to 0 - prepares tx hdr template */
+	sierra_net_set_ctx_index(priv, 0);
+
+	/* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
+	dev->rx_urb_size  = data->rx_urb_size;
+	if (dev->udev->speed != USB_SPEED_HIGH)
+		dev->rx_urb_size  = min_t(size_t, 4096, data->rx_urb_size);
+
+	dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
+	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+	/* Set up the netdev */
+	dev->net->flags |= IFF_NOARP;
+	dev->net->ethtool_ops = &sierra_net_ethtool_ops;
+	netif_carrier_off(dev->net);
+
+	sierra_net_set_private(dev, priv);
+
+	priv->kevent_flags = 0;
+
+	/* Use the shared workqueue */
+	INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent);
+
+	/* Only need to do this once */
+	init_timer(&priv->sync_timer);
+
+	/* verify fw attributes */
+	status = sierra_net_get_fw_attr(dev, &fwattr);
+	dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr);
+
+	/* test whether firmware supports DHCP */
+	if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) {
+		/* found incompatible firmware version */
+		dev_err(&dev->udev->dev, "Incompatible driver and firmware"
+			" versions\n");
+		kfree(priv);
+		return -ENODEV;
+	}
+	/* prepare sync message from template */
+	memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
+
+	/* initiate the sync sequence */
+	sierra_net_dosync(dev);
+
+	return 0;
+}
+
+static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+	int status;
+	struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+	dev_dbg(&dev->udev->dev, "%s", __func__);
+
+	/* Kill the timer then flush the work queue */
+	del_timer_sync(&priv->sync_timer);
+
+	flush_scheduled_work();
+
+	/* tell modem we are going away */
+	status = sierra_net_send_cmd(dev, priv->shdwn_msg,
+			sizeof(priv->shdwn_msg), "Shutdown");
+	if (status < 0)
+		netdev_err(dev->net,
+			"usb_control_msg failed, status %d\n", status);
+
+	sierra_net_set_private(dev, NULL);
+
+	kfree(priv);
+}
+
+static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev,
+		struct sk_buff *skb, int len)
+{
+	struct sk_buff *new_skb;
+
+	/* clone skb */
+	new_skb = skb_clone(skb, GFP_ATOMIC);
+
+	/* remove len bytes from original */
+	skb_pull(skb, len);
+
+	/* trim next packet to it's length */
+	if (new_skb) {
+		skb_trim(new_skb, len);
+	} else {
+		if (netif_msg_rx_err(dev))
+			netdev_err(dev->net, "failed to get skb\n");
+		dev->net->stats.rx_dropped++;
+	}
+
+	return new_skb;
+}
+
+/* ---------------------------- Receive data path ----------------------*/
+static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+	int err;
+	struct hip_hdr  hh;
+	struct sk_buff *new_skb;
+
+	dev_dbg(&dev->udev->dev, "%s", __func__);
+
+	/* could contain multiple packets */
+	while (likely(skb->len)) {
+		err = parse_hip(skb->data, skb->len, &hh);
+		if (err) {
+			if (netif_msg_rx_err(dev))
+				netdev_err(dev->net, "Invalid HIP header %d\n",
+					err);
+			/* dev->net->stats.rx_errors incremented by caller */
+			dev->net->stats.rx_length_errors++;
+			return 0;
+		}
+
+		/* Validate Extended HIP header */
+		if (!hh.extmsgid.is_present
+		    || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) {
+			if (netif_msg_rx_err(dev))
+				netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
+
+			dev->net->stats.rx_frame_errors++;
+			/* dev->net->stats.rx_errors incremented by caller */;
+			return 0;
+		}
+
+		skb_pull(skb, hh.hdrlen);
+
+		/* We are going to accept this packet, prepare it */
+		memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
+			ETH_HLEN);
+
+		/* Last packet in batch handled by usbnet */
+		if (hh.payload_len.word == skb->len)
+			return 1;
+
+		new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word);
+		if (new_skb)
+			usbnet_skb_return(dev, new_skb);
+
+	} /* while */
+
+	return 0;
+}
+
+/* ---------------------------- Transmit data path ----------------------*/
+struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+		gfp_t flags)
+{
+	struct sierra_net_data *priv = sierra_net_get_private(dev);
+	u16 len;
+	bool need_tail;
+
+	dev_dbg(&dev->udev->dev, "%s", __func__);
+	if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
+		/* enough head room as is? */
+		if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) {
+			/* Save the Eth/IP length and set up HIP hdr */
+			len = skb->len;
+			skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN);
+			/* Handle ZLP issue */
+			need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN)
+				% dev->maxpacket == 0);
+			if (need_tail) {
+				if (unlikely(skb_tailroom(skb) == 0)) {
+					netdev_err(dev->net, "tx_fixup:"
+						"no room for packet\n");
+					dev_kfree_skb_any(skb);
+					return NULL;
+				} else {
+					skb->data[skb->len] = 0;
+					__skb_put(skb, 1);
+					len = len + 1;
+				}
+			}
+			build_hip(skb->data, len, priv);
+			return skb;
+		} else {
+			/*
+			 * compensate in the future if necessary
+			 */
+			netdev_err(dev->net, "tx_fixup: no room for HIP\n");
+		} /* headroom */
+	}
+
+	if (!priv->link_up)
+		dev->net->stats.tx_carrier_errors++;
+
+	/* tx_dropped incremented by usbnet */
+
+	/* filter the packet out, release it  */
+	dev_kfree_skb_any(skb);
+	return NULL;
+}
+
+static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
+static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+	.rx_urb_size = 8 * 1024,
+	.whitelist = {
+		.infolen = ARRAY_SIZE(sierra_net_ifnum_list),
+		.ifaceinfo = sierra_net_ifnum_list
+	}
+};
+
+static const struct driver_info sierra_net_info_68A3 = {
+	.description = "Sierra Wireless USB-to-WWAN Modem",
+	.flags = FLAG_WWAN | FLAG_SEND_ZLP,
+	.bind = sierra_net_bind,
+	.unbind = sierra_net_unbind,
+	.status = sierra_net_status,
+	.rx_fixup = sierra_net_rx_fixup,
+	.tx_fixup = sierra_net_tx_fixup,
+	.data = (unsigned long)&sierra_net_info_data_68A3,
+};
+
+static const struct usb_device_id products[] = {
+	{USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
+	.driver_info = (unsigned long) &sierra_net_info_68A3},
+
+	{}, /* last item */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+/* We are based on usbnet, so let it handle the USB driver specifics */
+static struct usb_driver sierra_net_driver = {
+	.name = "sierra_net",
+	.id_table = products,
+	.probe = usbnet_probe,
+	.disconnect = usbnet_disconnect,
+	.suspend = usbnet_suspend,
+	.resume = usbnet_resume,
+	.no_dynamic_id = 1,
+};
+
+static int __init sierra_net_init(void)
+{
+	BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+				< sizeof(struct cdc_state));
+
+	return usb_register(&sierra_net_driver);
+}
+
+static void __exit sierra_net_exit(void)
+{
+	usb_deregister(&sierra_net_driver);
+}
+
+module_exit(sierra_net_exit);
+module_init(sierra_net_init);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 679da7e..ca42ccb 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -246,7 +246,7 @@
 	u32 idx, i;
 
 	i = (*index) % ring_limit;
-	(*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
+	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
 	idx %= ring_limit;
 
 	while (i != idx) {
diff --git a/include/linux/net.h b/include/linux/net.h
index 4157b5d..2b4deee 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -59,6 +59,7 @@
 #include <linux/wait.h>
 #include <linux/fcntl.h>	/* For O_CLOEXEC and O_NONBLOCK */
 #include <linux/kmemcheck.h>
+#include <linux/rcupdate.h>
 
 struct poll_table_struct;
 struct pipe_inode_info;
@@ -116,6 +117,12 @@
 	SHUT_RDWR	= 2,
 };
 
+struct socket_wq {
+	wait_queue_head_t	wait;
+	struct fasync_struct	*fasync_list;
+	struct rcu_head		rcu;
+} ____cacheline_aligned_in_smp;
+
 /**
  *  struct socket - general BSD socket
  *  @state: socket state (%SS_CONNECTED, etc)
@@ -135,11 +142,8 @@
 	kmemcheck_bitfield_end(type);
 
 	unsigned long		flags;
-	/*
-	 * Please keep fasync_list & wait fields in the same cache line
-	 */
-	struct fasync_struct	*fasync_list;
-	wait_queue_head_t	wait;
+
+	struct socket_wq	*wq;
 
 	struct file		*file;
 	struct sock		*sk;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 40d4c20..98112fbd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -218,16 +218,6 @@
 struct neigh_parms;
 struct sk_buff;
 
-struct netif_rx_stats {
-	unsigned total;
-	unsigned dropped;
-	unsigned time_squeeze;
-	unsigned cpu_collision;
-	unsigned received_rps;
-};
-
-DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
-
 struct netdev_hw_addr {
 	struct list_head	list;
 	unsigned char		addr[MAX_ADDR_LEN];
@@ -888,7 +878,7 @@
 	unsigned char		operstate; /* RFC2863 operstate */
 	unsigned char		link_mode; /* mapping policy to operstate */
 
-	unsigned		mtu;	/* interface MTU value		*/
+	unsigned int		mtu;	/* interface MTU value		*/
 	unsigned short		type;	/* interface hardware type	*/
 	unsigned short		hard_header_len;	/* hardware hdr length	*/
 
@@ -1390,6 +1380,12 @@
 	struct sk_buff		*completion_queue;
 	struct sk_buff_head	process_queue;
 
+	/* stats */
+	unsigned int		processed;
+	unsigned int		time_squeeze;
+	unsigned int		cpu_collision;
+	unsigned int		received_rps;
+
 #ifdef CONFIG_RPS
 	struct softnet_data	*rps_ipi_list;
 
@@ -1399,6 +1395,7 @@
 	unsigned int		cpu;
 	unsigned int		input_queue_head;
 #endif
+	unsigned		dropped;
 	struct sk_buff_head	input_pkt_queue;
 	struct napi_struct	backlog;
 };
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 004908b..4ec3b38 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -429,6 +429,23 @@
 		pos = rcu_dereference_raw(pos->next))
 
 /**
+ * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
+ * @tpos:	the type * to use as a loop cursor.
+ * @pos:	the &struct hlist_node to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member)		 \
+	for (pos = rcu_dereference_bh((head)->first);			 \
+		pos && ({ prefetch(pos->next); 1; }) &&			 \
+		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
+		pos = rcu_dereference_bh(pos->next))
+
+/**
  * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
  * @tpos:	the type * to use as a loop cursor.
  * @pos:	the &struct hlist_node to use as a loop cursor.
@@ -440,6 +457,18 @@
 	     ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; });  \
 	     pos = rcu_dereference(pos->next))
 
+/**
+ * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
+ * @tpos:	the type * to use as a loop cursor.
+ * @pos:	the &struct hlist_node to use as a loop cursor.
+ * @member:	the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member)		\
+	for (pos = rcu_dereference_bh((pos)->next);			\
+	     pos && ({ prefetch(pos->next); 1; }) &&			\
+	     ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; });  \
+	     pos = rcu_dereference_bh(pos->next))
+
 
 #endif	/* __KERNEL__ */
 #endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 82f5116..746a652 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1128,6 +1128,11 @@
 	return skb->data += len;
 }
 
+static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
+{
+	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+
 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
 
 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 1614d78..20725e2 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -30,7 +30,7 @@
 #endif
 };
 
-#define UNIXCB(skb) 	(*(struct unix_skb_parms*)&((skb)->cb))
+#define UNIXCB(skb) 	(*(struct unix_skb_parms *)&((skb)->cb))
 #define UNIXCREDS(skb)	(&UNIXCB((skb)).creds)
 #define UNIXSID(skb)	(&UNIXCB((skb)).secid)
 
@@ -45,21 +45,23 @@
 struct unix_sock {
 	/* WARNING: sk has to be the first member */
 	struct sock		sk;
-        struct unix_address     *addr;
-        struct dentry		*dentry;
-        struct vfsmount		*mnt;
+	struct unix_address     *addr;
+	struct dentry		*dentry;
+	struct vfsmount		*mnt;
 	struct mutex		readlock;
-        struct sock		*peer;
-        struct sock		*other;
+	struct sock		*peer;
+	struct sock		*other;
 	struct list_head	link;
-        atomic_long_t           inflight;
-        spinlock_t		lock;
+	atomic_long_t		inflight;
+	spinlock_t		lock;
 	unsigned int		gc_candidate : 1;
 	unsigned int		gc_maybe_cycle : 1;
-        wait_queue_head_t       peer_wait;
+	struct socket_wq	peer_wq;
 };
 #define unix_sk(__sk) ((struct unix_sock *)__sk)
 
+#define peer_wait peer_wq.wait
+
 #ifdef CONFIG_SYSCTL
 extern int unix_sysctl_register(struct net *net);
 extern void unix_sysctl_unregister(struct net *net);
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 8be5135..2c55a7e 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -107,6 +107,7 @@
 	SCTP_CMD_T1_RETRAN,	 /* Mark for retransmission after T1 timeout  */
 	SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
 	SCTP_CMD_SEND_MSG,	 /* Send the whole use message */
+	SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
 	SCTP_CMD_LAST
 } sctp_verb_t;
 
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 289241d..65946bc 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -128,6 +128,7 @@
 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 int sctp_inet_listen(struct socket *sock, int backlog);
 void sctp_write_space(struct sock *sk);
+void sctp_data_ready(struct sock *sk, int len);
 unsigned int sctp_poll(struct file *file, struct socket *sock,
 		poll_table *wait);
 void sctp_sock_rfree(struct sk_buff *skb);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 9d44aef..43257b9 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -775,6 +775,7 @@
 			  struct iovec *data);
 void sctp_chunk_free(struct sctp_chunk *);
 void  *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+void  *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
 struct sctp_chunk *sctp_chunkify(struct sk_buff *,
 				 const struct sctp_association *,
 				 struct sock *);
diff --git a/include/net/sock.h b/include/net/sock.h
index e1777db..328e03f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -74,7 +74,7 @@
 					printk(KERN_DEBUG msg); } while (0)
 #else
 /* Validate arguments and do nothing */
-static void inline int __attribute__ ((format (printf, 2, 3)))
+static inline void __attribute__ ((format (printf, 2, 3)))
 SOCK_DEBUG(struct sock *sk, const char *msg, ...)
 {
 }
@@ -159,7 +159,7 @@
   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
   *	@sk_lock:	synchronizer
   *	@sk_rcvbuf: size of receive buffer in bytes
-  *	@sk_sleep: sock wait queue
+  *	@sk_wq: sock wait queue and async head
   *	@sk_dst_cache: destination cache
   *	@sk_dst_lock: destination cache lock
   *	@sk_policy: flow policy
@@ -257,7 +257,7 @@
 		struct sk_buff *tail;
 		int len;
 	} sk_backlog;
-	wait_queue_head_t	*sk_sleep;
+	struct socket_wq	*sk_wq;
 	struct dst_entry	*sk_dst_cache;
 #ifdef CONFIG_XFRM
 	struct xfrm_policy	*sk_policy[2];
@@ -1219,7 +1219,7 @@
 
 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
 {
-	return sk->sk_sleep;
+	return &sk->sk_wq->wait;
 }
 /* Detach socket from process context.
  * Announce socket dead, detach it from wait queue and inode.
@@ -1233,14 +1233,14 @@
 	write_lock_bh(&sk->sk_callback_lock);
 	sock_set_flag(sk, SOCK_DEAD);
 	sk_set_socket(sk, NULL);
-	sk->sk_sleep  = NULL;
+	sk->sk_wq  = NULL;
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
 static inline void sock_graft(struct sock *sk, struct socket *parent)
 {
 	write_lock_bh(&sk->sk_callback_lock);
-	sk->sk_sleep = &parent->wait;
+	rcu_assign_pointer(sk->sk_wq, parent->wq);
 	parent->sk = sk;
 	sk_set_socket(sk, parent);
 	security_sock_graft(sk, parent);
@@ -1392,12 +1392,12 @@
 }
 
 /**
- * sk_has_sleeper - check if there are any waiting processes
- * @sk: socket
+ * wq_has_sleeper - check if there are any waiting processes
+ * @sk: struct socket_wq
  *
- * Returns true if socket has waiting processes
+ * Returns true if socket_wq has waiting processes
  *
- * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
+ * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
  * barrier call. They were added due to the race found within the tcp code.
  *
  * Consider following tcp code paths:
@@ -1410,9 +1410,10 @@
  *   ...                 ...
  *   tp->rcv_nxt check   sock_def_readable
  *   ...                 {
- *   schedule               ...
- *                          if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- *                              wake_up_interruptible(sk_sleep(sk))
+ *   schedule               rcu_read_lock();
+ *                          wq = rcu_dereference(sk->sk_wq);
+ *                          if (wq && waitqueue_active(&wq->wait))
+ *                              wake_up_interruptible(&wq->wait)
  *                          ...
  *                       }
  *
@@ -1421,19 +1422,18 @@
  * could then endup calling schedule and sleep forever if there are no more
  * data on the socket.
  *
- * The sk_has_sleeper is always called right after a call to read_lock, so we
- * can use smp_mb__after_lock barrier.
  */
-static inline int sk_has_sleeper(struct sock *sk)
+static inline bool wq_has_sleeper(struct socket_wq *wq)
 {
+
 	/*
 	 * We need to be sure we are in sync with the
 	 * add_wait_queue modifications to the wait queue.
 	 *
 	 * This memory barrier is paired in the sock_poll_wait.
 	 */
-	smp_mb__after_lock();
-	return sk_sleep(sk) && waitqueue_active(sk_sleep(sk));
+	smp_mb();
+	return wq && waitqueue_active(&wq->wait);
 }
 
 /**
@@ -1442,7 +1442,7 @@
  * @wait_address:   socket wait queue
  * @p:              poll_table
  *
- * See the comments in the sk_has_sleeper function.
+ * See the comments in the wq_has_sleeper function.
  */
 static inline void sock_poll_wait(struct file *filp,
 		wait_queue_head_t *wait_address, poll_table *p)
@@ -1453,7 +1453,7 @@
 		 * We need to be sure we are in sync with the
 		 * socket flags modification.
 		 *
-		 * This memory barrier is paired in the sk_has_sleeper.
+		 * This memory barrier is paired in the wq_has_sleeper.
 		*/
 		smp_mb();
 	}
diff --git a/net/atm/common.c b/net/atm/common.c
index e3e10e6..b43feb1 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -90,10 +90,13 @@
 
 static void vcc_def_wakeup(struct sock *sk)
 {
-	read_lock(&sk->sk_callback_lock);
-	if (sk_has_sleeper(sk))
-		wake_up(sk_sleep(sk));
-	read_unlock(&sk->sk_callback_lock);
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up(&wq->wait);
+	rcu_read_unlock();
 }
 
 static inline int vcc_writable(struct sock *sk)
@@ -106,16 +109,19 @@
 
 static void vcc_write_space(struct sock *sk)
 {
-	read_lock(&sk->sk_callback_lock);
+	struct socket_wq *wq;
+
+	rcu_read_lock();
 
 	if (vcc_writable(sk)) {
-		if (sk_has_sleeper(sk))
-			wake_up_interruptible(sk_sleep(sk));
+		wq = rcu_dereference(sk->sk_wq);
+		if (wq_has_sleeper(wq))
+			wake_up_interruptible(&wq->wait);
 
 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 	}
 
-	read_unlock(&sk->sk_callback_lock);
+	rcu_read_unlock();
 }
 
 static struct proto vcc_proto = {
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index c1e60ee..864c76f 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1626,7 +1626,10 @@
 	/* Connectionless channel */
 	if (sk->sk_type == SOCK_DGRAM) {
 		skb = l2cap_create_connless_pdu(sk, msg, len);
-		err = l2cap_do_send(sk, skb);
+		if (IS_ERR(skb))
+			err = PTR_ERR(skb);
+		else
+			err = l2cap_do_send(sk, skb);
 		goto done;
 	}
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 100dcbd..36d53be 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2205,8 +2205,6 @@
 int netdev_budget __read_mostly = 300;
 int weight_p __read_mostly = 64;            /* old backlog weight */
 
-DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
-
 #ifdef CONFIG_RPS
 
 /* One global table that all flow-based protocols share. */
@@ -2366,7 +2364,7 @@
 	struct softnet_data *sd = data;
 
 	__napi_schedule(&sd->backlog);
-	__get_cpu_var(netdev_rx_stat).received_rps++;
+	sd->received_rps++;
 }
 
 #endif /* CONFIG_RPS */
@@ -2405,7 +2403,6 @@
 	sd = &per_cpu(softnet_data, cpu);
 
 	local_irq_save(flags);
-	__get_cpu_var(netdev_rx_stat).total++;
 
 	rps_lock(sd);
 	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
@@ -2429,9 +2426,9 @@
 		goto enqueue;
 	}
 
+	sd->dropped++;
 	rps_unlock(sd);
 
-	__get_cpu_var(netdev_rx_stat).dropped++;
 	local_irq_restore(flags);
 
 	kfree_skb(skb);
@@ -2806,7 +2803,7 @@
 			skb->dev = master;
 	}
 
-	__get_cpu_var(netdev_rx_stat).total++;
+	__get_cpu_var(softnet_data).processed++;
 
 	skb_reset_network_header(skb);
 	skb_reset_transport_header(skb);
@@ -3490,7 +3487,7 @@
 	return;
 
 softnet_break:
-	__get_cpu_var(netdev_rx_stat).time_squeeze++;
+	sd->time_squeeze++;
 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
 	goto out;
 }
@@ -3691,17 +3688,17 @@
 	return 0;
 }
 
-static struct netif_rx_stats *softnet_get_online(loff_t *pos)
+static struct softnet_data *softnet_get_online(loff_t *pos)
 {
-	struct netif_rx_stats *rc = NULL;
+	struct softnet_data *sd = NULL;
 
 	while (*pos < nr_cpu_ids)
 		if (cpu_online(*pos)) {
-			rc = &per_cpu(netdev_rx_stat, *pos);
+			sd = &per_cpu(softnet_data, *pos);
 			break;
 		} else
 			++*pos;
-	return rc;
+	return sd;
 }
 
 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3721,12 +3718,12 @@
 
 static int softnet_seq_show(struct seq_file *seq, void *v)
 {
-	struct netif_rx_stats *s = v;
+	struct softnet_data *sd = v;
 
 	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
-		   s->total, s->dropped, s->time_squeeze, 0,
+		   sd->processed, sd->dropped, sd->time_squeeze, 0,
 		   0, 0, 0, 0, /* was fastroute */
-		   s->cpu_collision, s->received_rps);
+		   sd->cpu_collision, sd->received_rps);
 	return 0;
 }
 
@@ -5869,6 +5866,7 @@
 	for_each_possible_cpu(i) {
 		struct softnet_data *sd = &per_cpu(softnet_data, i);
 
+		memset(sd, 0, sizeof(*sd));
 		skb_queue_head_init(&sd->input_pkt_queue);
 		skb_queue_head_init(&sd->process_queue);
 		sd->completion_queue = NULL;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4218ff4..8b9c109 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1051,7 +1051,7 @@
  */
 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
 {
-	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+	return skb_pull_inline(skb, len);
 }
 EXPORT_SYMBOL(skb_pull);
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 5104175..94c4aff 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1211,7 +1211,7 @@
 		 */
 		sk_refcnt_debug_inc(newsk);
 		sk_set_socket(newsk, NULL);
-		newsk->sk_sleep	 = NULL;
+		newsk->sk_wq = NULL;
 
 		if (newsk->sk_prot->sockets_allocated)
 			percpu_counter_inc(newsk->sk_prot->sockets_allocated);
@@ -1800,41 +1800,53 @@
 
 static void sock_def_wakeup(struct sock *sk)
 {
-	read_lock(&sk->sk_callback_lock);
-	if (sk_has_sleeper(sk))
-		wake_up_interruptible_all(sk_sleep(sk));
-	read_unlock(&sk->sk_callback_lock);
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_all(&wq->wait);
+	rcu_read_unlock();
 }
 
 static void sock_def_error_report(struct sock *sk)
 {
-	read_lock(&sk->sk_callback_lock);
-	if (sk_has_sleeper(sk))
-		wake_up_interruptible_poll(sk_sleep(sk), POLLERR);
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_poll(&wq->wait, POLLERR);
 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
-	read_unlock(&sk->sk_callback_lock);
+	rcu_read_unlock();
 }
 
 static void sock_def_readable(struct sock *sk, int len)
 {
-	read_lock(&sk->sk_callback_lock);
-	if (sk_has_sleeper(sk))
-		wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN |
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
 						POLLRDNORM | POLLRDBAND);
 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
-	read_unlock(&sk->sk_callback_lock);
+	rcu_read_unlock();
 }
 
 static void sock_def_write_space(struct sock *sk)
 {
-	read_lock(&sk->sk_callback_lock);
+	struct socket_wq *wq;
+
+	rcu_read_lock();
 
 	/* Do not wake up a writer until he can make "significant"
 	 * progress.  --DaveM
 	 */
 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
-		if (sk_has_sleeper(sk))
-			wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
+		wq = rcu_dereference(sk->sk_wq);
+		if (wq_has_sleeper(wq))
+			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
 						POLLWRNORM | POLLWRBAND);
 
 		/* Should agree with poll, otherwise some programs break */
@@ -1842,7 +1854,7 @@
 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 	}
 
-	read_unlock(&sk->sk_callback_lock);
+	rcu_read_unlock();
 }
 
 static void sock_def_destruct(struct sock *sk)
@@ -1896,10 +1908,10 @@
 
 	if (sock) {
 		sk->sk_type	=	sock->type;
-		sk->sk_sleep	=	&sock->wait;
+		sk->sk_wq	=	sock->wq;
 		sock->sk	=	sk;
 	} else
-		sk->sk_sleep	=	NULL;
+		sk->sk_wq	=	NULL;
 
 	spin_lock_init(&sk->sk_dst_lock);
 	rwlock_init(&sk->sk_callback_lock);
diff --git a/net/core/stream.c b/net/core/stream.c
index 7b3c3f3..cc196f42b 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -28,15 +28,19 @@
 void sk_stream_write_space(struct sock *sk)
 {
 	struct socket *sock = sk->sk_socket;
+	struct socket_wq *wq;
 
 	if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
 		clear_bit(SOCK_NOSPACE, &sock->flags);
 
-		if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-			wake_up_interruptible_poll(sk_sleep(sk), POLLOUT |
+		rcu_read_lock();
+		wq = rcu_dereference(sk->sk_wq);
+		if (wq_has_sleeper(wq))
+			wake_up_interruptible_poll(&wq->wait, POLLOUT |
 						POLLWRNORM | POLLWRBAND);
-		if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
+		if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
 			sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+		rcu_read_unlock();
 	}
 }
 
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 2d3dcb3..aadbdb5 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -195,15 +195,17 @@
 
 void dccp_write_space(struct sock *sk)
 {
-	read_lock(&sk->sk_callback_lock);
+	struct socket_wq *wq;
 
-	if (sk_has_sleeper(sk))
-		wake_up_interruptible(sk_sleep(sk));
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible(&wq->wait);
 	/* Should agree with poll, otherwise some programs break */
 	if (sock_writeable(sk))
 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 
-	read_unlock(&sk->sk_callback_lock);
+	rcu_read_unlock();
 }
 
 /**
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 0c0d272..61ec032 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -162,7 +162,7 @@
 
 	skb->dev = dev;
 	skb_reset_mac_header(skb);
-	skb_pull(skb, ETH_HLEN);
+	skb_pull_inline(skb, ETH_HLEN);
 	eth = eth_hdr(skb);
 
 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 78cbc39..e0a3e35 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -70,17 +70,13 @@
 		    (!sk->sk_bound_dev_if ||
 		     !sk2->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
-			const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
-
 			if (!reuse || !sk2->sk_reuse ||
 			    sk2->sk_state == TCP_LISTEN) {
+				const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
 				if (!sk2_rcv_saddr || !sk_rcv_saddr ||
 				    sk2_rcv_saddr == sk_rcv_saddr)
 					break;
-			} else if (reuse && sk2->sk_reuse &&
-				   sk2_rcv_saddr &&
-				   sk2_rcv_saddr == sk_rcv_saddr)
-				break;
+			}
 		}
 	}
 	return node != NULL;
@@ -124,11 +120,9 @@
 						smallest_size = tb->num_owners;
 						smallest_rover = rover;
 						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
-							if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
-								spin_unlock(&head->lock);
-								snum = smallest_rover;
-								goto have_snum;
-							}
+							spin_unlock(&head->lock);
+							snum = smallest_rover;
+							goto have_snum;
 						}
 					}
 					goto next;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 34d2d64..3984f52 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1346,7 +1346,7 @@
 	struct hlist_node *node;
 
 	rcu_read_lock_bh();
-	hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+	hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
 		if (!net_eq(dev_net(ifp->idev->dev), net))
 			continue;
 		if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -2959,7 +2959,7 @@
 
 	for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
 		struct hlist_node *n;
-		hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket],
+		hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
 					 addr_lst)
 			if (net_eq(dev_net(ifa->idev->dev), net))
 				return ifa;
@@ -2974,12 +2974,12 @@
 	struct net *net = seq_file_net(seq);
 	struct hlist_node *n = &ifa->addr_lst;
 
-	hlist_for_each_entry_continue_rcu(ifa, n, addr_lst)
+	hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
 		if (net_eq(dev_net(ifa->idev->dev), net))
 			return ifa;
 
 	while (++state->bucket < IN6_ADDR_HSIZE) {
-		hlist_for_each_entry(ifa, n,
+		hlist_for_each_entry_rcu_bh(ifa, n,
 				     &inet6_addr_lst[state->bucket], addr_lst) {
 			if (net_eq(dev_net(ifa->idev->dev), net))
 				return ifa;
@@ -3000,7 +3000,7 @@
 }
 
 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
-	__acquires(rcu)
+	__acquires(rcu_bh)
 {
 	rcu_read_lock_bh();
 	return if6_get_idx(seq, *pos);
@@ -3016,7 +3016,7 @@
 }
 
 static void if6_seq_stop(struct seq_file *seq, void *v)
-	__releases(rcu)
+	__releases(rcu_bh)
 {
 	rcu_read_unlock_bh();
 }
@@ -3093,7 +3093,7 @@
 	unsigned int hash = ipv6_addr_hash(addr);
 
 	rcu_read_lock_bh();
-	hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) {
+	hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
 		if (!net_eq(dev_net(ifp->idev->dev), net))
 			continue;
 		if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3127,7 +3127,7 @@
 
 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
 restart:
-		hlist_for_each_entry_rcu(ifp, node,
+		hlist_for_each_entry_rcu_bh(ifp, node,
 					 &inet6_addr_lst[i], addr_lst) {
 			unsigned long age;
 
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 9ca1efc..0c5e3c3 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -42,16 +42,11 @@
 		if (sk != sk2 &&
 		    (!sk->sk_bound_dev_if ||
 		     !sk2->sk_bound_dev_if ||
-		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
-			if ((!sk->sk_reuse || !sk2->sk_reuse ||
-			     sk2->sk_state == TCP_LISTEN) &&
-			     ipv6_rcv_saddr_equal(sk, sk2))
-				break;
-			else if (sk->sk_reuse && sk2->sk_reuse &&
-				!ipv6_addr_any(inet6_rcv_saddr(sk)) &&
-				ipv6_rcv_saddr_equal(sk, sk2))
-				break;
-		}
+		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
+		    (!sk->sk_reuse || !sk2->sk_reuse ||
+		     sk2->sk_state == TCP_LISTEN) &&
+		     ipv6_rcv_saddr_equal(sk, sk2))
+			break;
 	}
 
 	return node != NULL;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 9636b7d..8be324f 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -305,11 +305,14 @@
  */
 static void iucv_sock_wake_msglim(struct sock *sk)
 {
-	read_lock(&sk->sk_callback_lock);
-	if (sk_has_sleeper(sk))
-		wake_up_interruptible_all(sk_sleep(sk));
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_all(&wq->wait);
 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
-	read_unlock(&sk->sk_callback_lock);
+	rcu_read_unlock();
 }
 
 /* Timers */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e2a9576..af4d38b 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -664,12 +664,12 @@
 		if (signal_pending(tsk))
 			return sock_intr_errno(timeo);
 
-		prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait,
+		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
 						TASK_INTERRUPTIBLE);
 		release_sock(sk);
 		timeo = schedule_timeout(timeo);
 		lock_sock(sk);
-		finish_wait(&sk->sk_socket->wait, &wait);
+		finish_wait(sk_sleep(sk), &wait);
 	}
 
 	return 0;
@@ -910,10 +910,10 @@
 			goto out;
 		}
 
-		prepare_to_wait(&sk->sk_socket->wait, &wait,
+		prepare_to_wait(sk_sleep(sk), &wait,
 				TASK_INTERRUPTIBLE);
 		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
-		finish_wait(&sk->sk_socket->wait, &wait);
+		finish_wait(sk_sleep(sk), &wait);
 
 		if (sk->sk_state != TCP_ESTABLISHED)
 			goto disabled;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index c785bfd..6e9848bf 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -265,7 +265,7 @@
 	struct pep_sock *pn = pep_sk(sk);
 	unsigned int mask = 0;
 
-	poll_wait(file, &sock->wait, wait);
+	poll_wait(file, sk_sleep(sk), wait);
 
 	switch (sk->sk_state) {
 	case TCP_LISTEN:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c432d76..0b9bb20 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -62,13 +62,15 @@
 static void rxrpc_write_space(struct sock *sk)
 {
 	_enter("%p", sk);
-	read_lock(&sk->sk_callback_lock);
+	rcu_read_lock();
 	if (rxrpc_writable(sk)) {
-		if (sk_has_sleeper(sk))
-			wake_up_interruptible(sk_sleep(sk));
+		struct socket_wq *wq = rcu_dereference(sk->sk_wq);
+
+		if (wq_has_sleeper(wq))
+			wake_up_interruptible(&wq->wait);
 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 	}
-	read_unlock(&sk->sk_callback_lock);
+	rcu_read_unlock();
 }
 
 /*
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index aeddabf..a969b11 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -94,7 +94,7 @@
 		 * Another cpu is holding lock, requeue & delay xmits for
 		 * some time.
 		 */
-		__get_cpu_var(netdev_rx_stat).cpu_collision++;
+		__get_cpu_var(softnet_data).cpu_collision++;
 		ret = dev_requeue_skb(skb, q);
 	}
 
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 65f9a7c..3912420 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1192,8 +1192,10 @@
 	/* Remove any peer addresses not present in the new association. */
 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
 		trans = list_entry(pos, struct sctp_transport, transports);
-		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
-			sctp_assoc_del_peer(asoc, &trans->ipaddr);
+		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
+			sctp_assoc_rm_peer(asoc, trans);
+			continue;
+		}
 
 		if (asoc->state >= SCTP_STATE_ESTABLISHED)
 			sctp_transport_reset(trans);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2f8763b..e10acc0 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -142,6 +142,7 @@
 	/* Use SCTP specific send buffer space queues.  */
 	ep->sndbuf_policy = sctp_sndbuf_policy;
 
+	sk->sk_data_ready = sctp_data_ready;
 	sk->sk_write_space = sctp_write_space;
 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 8f025d5..db3a42b 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -27,6 +27,7 @@
 #include <linux/socket.h>
 #include <linux/sctp.h>
 #include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/kfifo.h>
 #include <linux/time.h>
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 24effdf..d8261f3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -108,7 +108,7 @@
 	cpu_to_be16(sizeof(struct sctp_paramhdr)),
 };
 
-/* A helper to initialize to initialize an op error inside a
+/* A helper to initialize an op error inside a
  * provided chunk, as most cause codes will be embedded inside an
  * abort chunk.
  */
@@ -125,6 +125,29 @@
 	chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
 }
 
+/* A helper to initialize an op error inside a
+ * provided chunk, as most cause codes will be embedded inside an
+ * abort chunk.  Differs from sctp_init_cause in that it won't oops
+ * if there isn't enough space in the op error chunk
+ */
+int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
+		      size_t paylen)
+{
+	sctp_errhdr_t err;
+	__u16 len;
+
+	/* Cause code constants are now defined in network order.  */
+	err.cause = cause_code;
+	len = sizeof(sctp_errhdr_t) + paylen;
+	err.length  = htons(len);
+
+	if (skb_tailroom(chunk->skb) >  len)
+		return -ENOSPC;
+	chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
+						     sizeof(sctp_errhdr_t),
+						     &err);
+	return 0;
+}
 /* 3.3.2 Initiation (INIT) (1)
  *
  * This chunk is used to initiate a SCTP association between two
@@ -208,7 +231,8 @@
 	sp = sctp_sk(asoc->base.sk);
 	num_types = sp->pf->supported_addrs(sp, types);
 
-	chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
+	chunksize = sizeof(init) + addrs_len;
+	chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
 	chunksize += sizeof(ecap_param);
 
 	if (sctp_prsctp_enable)
@@ -238,14 +262,14 @@
 		/* Add HMACS parameter length if any were defined */
 		auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
 		if (auth_hmacs->length)
-			chunksize += ntohs(auth_hmacs->length);
+			chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
 		else
 			auth_hmacs = NULL;
 
 		/* Add CHUNKS parameter length */
 		auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
 		if (auth_chunks->length)
-			chunksize += ntohs(auth_chunks->length);
+			chunksize += WORD_ROUND(ntohs(auth_chunks->length));
 		else
 			auth_chunks = NULL;
 
@@ -255,7 +279,8 @@
 
 	/* If we have any extensions to report, account for that */
 	if (num_ext)
-		chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
+		chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
+					num_ext);
 
 	/* RFC 2960 3.3.2 Initiation (INIT) (1)
 	 *
@@ -397,13 +422,13 @@
 
 		auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
 		if (auth_hmacs->length)
-			chunksize += ntohs(auth_hmacs->length);
+			chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
 		else
 			auth_hmacs = NULL;
 
 		auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
 		if (auth_chunks->length)
-			chunksize += ntohs(auth_chunks->length);
+			chunksize += WORD_ROUND(ntohs(auth_chunks->length));
 		else
 			auth_chunks = NULL;
 
@@ -412,7 +437,8 @@
 	}
 
 	if (num_ext)
-		chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
+		chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
+					num_ext);
 
 	/* Now allocate and fill out the chunk.  */
 	retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
@@ -1124,6 +1150,24 @@
 	return retval;
 }
 
+/* Create an Operation Error chunk of a fixed size,
+ * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
+ * This is a helper function to allocate an error chunk for
+ * for those invalid parameter codes in which we may not want
+ * to report all the errors, if the incomming chunk is large
+ */
+static inline struct sctp_chunk *sctp_make_op_error_fixed(
+	const struct sctp_association *asoc,
+	const struct sctp_chunk *chunk)
+{
+	size_t size = asoc ? asoc->pathmtu : 0;
+
+	if (!size)
+		size = SCTP_DEFAULT_MAXSEGMENT;
+
+	return sctp_make_op_error_space(asoc, chunk, size);
+}
+
 /* Create an Operation Error chunk.  */
 struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
 				 const struct sctp_chunk *chunk,
@@ -1365,6 +1409,18 @@
 	return target;
 }
 
+/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
+ * space in the chunk
+ */
+void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
+			     int len, const void *data)
+{
+	if (skb_tailroom(chunk->skb) > len)
+		return sctp_addto_chunk(chunk, len, data);
+	else
+		return NULL;
+}
+
 /* Append bytes from user space to the end of a chunk.  Will panic if
  * chunk is not big enough.
  * Returns a kernel err value.
@@ -1968,13 +2024,12 @@
 		 * returning multiple unknown parameters.
 		 */
 		if (NULL == *errp)
-			*errp = sctp_make_op_error_space(asoc, chunk,
-					ntohs(chunk->chunk_hdr->length));
+			*errp = sctp_make_op_error_fixed(asoc, chunk);
 
 		if (*errp) {
-			sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+			sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
 					WORD_ROUND(ntohs(param.p->length)));
-			sctp_addto_chunk(*errp,
+			sctp_addto_chunk_fixed(*errp,
 					WORD_ROUND(ntohs(param.p->length)),
 					param.v);
 		} else {
@@ -3309,21 +3364,6 @@
 	sctp_chunk_free(asconf);
 	asoc->addip_last_asconf = NULL;
 
-	/* Send the next asconf chunk from the addip chunk queue. */
-	if (!list_empty(&asoc->addip_chunk_list)) {
-		struct list_head *entry = asoc->addip_chunk_list.next;
-		asconf = list_entry(entry, struct sctp_chunk, list);
-
-		list_del_init(entry);
-
-		/* Hold the chunk until an ASCONF_ACK is received. */
-		sctp_chunk_hold(asconf);
-		if (sctp_primitive_ASCONF(asoc, asconf))
-			sctp_chunk_free(asconf);
-		else
-			asoc->addip_last_asconf = asconf;
-	}
-
 	return retval;
 }
 
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 49fb9ac..3b7230e 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -966,6 +966,29 @@
 }
 
 
+/* Sent the next ASCONF packet currently stored in the association.
+ * This happens after the ASCONF_ACK was succeffully processed.
+ */
+static void sctp_cmd_send_asconf(struct sctp_association *asoc)
+{
+	/* Send the next asconf chunk from the addip chunk
+	 * queue.
+	 */
+	if (!list_empty(&asoc->addip_chunk_list)) {
+		struct list_head *entry = asoc->addip_chunk_list.next;
+		struct sctp_chunk *asconf = list_entry(entry,
+						struct sctp_chunk, list);
+		list_del_init(entry);
+
+		/* Hold the chunk until an ASCONF_ACK is received. */
+		sctp_chunk_hold(asconf);
+		if (sctp_primitive_ASCONF(asoc, asconf))
+			sctp_chunk_free(asconf);
+		else
+			asoc->addip_last_asconf = asconf;
+	}
+}
+
 
 /* These three macros allow us to pull the debugging code out of the
  * main flow of sctp_do_sm() to keep attention focused on the real
@@ -1621,6 +1644,9 @@
 			}
 			error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
 			break;
+		case SCTP_CMD_SEND_NEXT_ASCONF:
+			sctp_cmd_send_asconf(asoc);
+			break;
 		default:
 			printk(KERN_WARNING "Impossible command: %u, %p\n",
 			       cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index abf601a..24b2cd5 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3676,8 +3676,14 @@
 				SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
 
 		if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
-					     asconf_ack))
+					     asconf_ack)) {
+			/* Successfully processed ASCONF_ACK.  We can
+			 * release the next asconf if we have one.
+			 */
+			sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
+					SCTP_NULL());
 			return SCTP_DISPOSITION_CONSUME;
+		}
 
 		abort = sctp_make_abort(asoc, asconf_ack,
 					sizeof(sctp_errhdr_t));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1282a0e..ba1add0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3719,9 +3719,9 @@
 	sp->hmac = NULL;
 
 	SCTP_DBG_OBJCNT_INC(sock);
-	percpu_counter_inc(&sctp_sockets_allocated);
 
 	local_bh_disable();
+	percpu_counter_inc(&sctp_sockets_allocated);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 	local_bh_enable();
 
@@ -3738,8 +3738,8 @@
 	/* Release our hold on the endpoint. */
 	ep = sctp_sk(sk)->ep;
 	sctp_endpoint_free(ep);
-	percpu_counter_dec(&sctp_sockets_allocated);
 	local_bh_disable();
+	percpu_counter_dec(&sctp_sockets_allocated);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 	local_bh_enable();
 }
@@ -6065,7 +6065,7 @@
 			 * here by modeling from the current TCP/UDP code.
 			 * We have not tested with it yet.
 			 */
-			if (sock->fasync_list &&
+			if (sock->wq->fasync_list &&
 			    !(sk->sk_shutdown & SEND_SHUTDOWN))
 				sock_wake_async(sock,
 						SOCK_WAKE_SPACE, POLL_OUT);
@@ -6185,6 +6185,19 @@
 	goto out;
 }
 
+void sctp_data_ready(struct sock *sk, int len)
+{
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+						POLLRDNORM | POLLRDBAND);
+	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+	rcu_read_unlock();
+}
+
 /* If socket sndbuf has changed, wake up all per association waiters.  */
 void sctp_write_space(struct sock *sk)
 {
diff --git a/net/socket.c b/net/socket.c
index cb7c1f6..dae8c6b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -252,9 +252,14 @@
 	ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
-	init_waitqueue_head(&ei->socket.wait);
+	ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
+	if (!ei->socket.wq) {
+		kmem_cache_free(sock_inode_cachep, ei);
+		return NULL;
+	}
+	init_waitqueue_head(&ei->socket.wq->wait);
+	ei->socket.wq->fasync_list = NULL;
 
-	ei->socket.fasync_list = NULL;
 	ei->socket.state = SS_UNCONNECTED;
 	ei->socket.flags = 0;
 	ei->socket.ops = NULL;
@@ -264,10 +269,21 @@
 	return &ei->vfs_inode;
 }
 
+
+static void wq_free_rcu(struct rcu_head *head)
+{
+	struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
+
+	kfree(wq);
+}
+
 static void sock_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(sock_inode_cachep,
-			container_of(inode, struct socket_alloc, vfs_inode));
+	struct socket_alloc *ei;
+
+	ei = container_of(inode, struct socket_alloc, vfs_inode);
+	call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+	kmem_cache_free(sock_inode_cachep, ei);
 }
 
 static void init_once(void *foo)
@@ -513,7 +529,7 @@
 		module_put(owner);
 	}
 
-	if (sock->fasync_list)
+	if (sock->wq->fasync_list)
 		printk(KERN_ERR "sock_release: fasync list not empty!\n");
 
 	percpu_sub(sockets_in_use, 1);
@@ -1080,9 +1096,9 @@
 
 	lock_sock(sk);
 
-	fasync_helper(fd, filp, on, &sock->fasync_list);
+	fasync_helper(fd, filp, on, &sock->wq->fasync_list);
 
-	if (!sock->fasync_list)
+	if (!sock->wq->fasync_list)
 		sock_reset_flag(sk, SOCK_FASYNC);
 	else
 		sock_set_flag(sk, SOCK_FASYNC);
@@ -1091,12 +1107,20 @@
 	return 0;
 }
 
-/* This function may be called only under socket lock or callback_lock */
+/* This function may be called only under socket lock or callback_lock or rcu_lock */
 
 int sock_wake_async(struct socket *sock, int how, int band)
 {
-	if (!sock || !sock->fasync_list)
+	struct socket_wq *wq;
+
+	if (!sock)
 		return -1;
+	rcu_read_lock();
+	wq = rcu_dereference(sock->wq);
+	if (!wq || !wq->fasync_list) {
+		rcu_read_unlock();
+		return -1;
+	}
 	switch (how) {
 	case SOCK_WAKE_WAITD:
 		if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
@@ -1108,11 +1132,12 @@
 		/* fall through */
 	case SOCK_WAKE_IO:
 call_kill:
-		kill_fasync(&sock->fasync_list, SIGIO, band);
+		kill_fasync(&wq->fasync_list, SIGIO, band);
 		break;
 	case SOCK_WAKE_URG:
-		kill_fasync(&sock->fasync_list, SIGURG, band);
+		kill_fasync(&wq->fasync_list, SIGURG, band);
 	}
+	rcu_read_unlock();
 	return 0;
 }
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 87c0360..fef2cc5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -313,13 +313,16 @@
 
 static void unix_write_space(struct sock *sk)
 {
-	read_lock(&sk->sk_callback_lock);
+	struct socket_wq *wq;
+
+	rcu_read_lock();
 	if (unix_writable(sk)) {
-		if (sk_has_sleeper(sk))
-			wake_up_interruptible_sync(sk_sleep(sk));
+		wq = rcu_dereference(sk->sk_wq);
+		if (wq_has_sleeper(wq))
+			wake_up_interruptible_sync(&wq->wait);
 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 	}
-	read_unlock(&sk->sk_callback_lock);
+	rcu_read_unlock();
 }
 
 /* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -406,9 +409,7 @@
 				skpair->sk_err = ECONNRESET;
 			unix_state_unlock(skpair);
 			skpair->sk_state_change(skpair);
-			read_lock(&skpair->sk_callback_lock);
 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
-			read_unlock(&skpair->sk_callback_lock);
 		}
 		sock_put(skpair); /* It may now die */
 		unix_peer(sk) = NULL;
@@ -1142,7 +1143,7 @@
 	newsk->sk_peercred.pid	= task_tgid_vnr(current);
 	current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
 	newu = unix_sk(newsk);
-	newsk->sk_sleep		= &newu->peer_wait;
+	newsk->sk_wq		= &newu->peer_wq;
 	otheru = unix_sk(other);
 
 	/* copy address information from listening to new sock*/
@@ -1931,12 +1932,10 @@
 			other->sk_shutdown |= peer_mode;
 			unix_state_unlock(other);
 			other->sk_state_change(other);
-			read_lock(&other->sk_callback_lock);
 			if (peer_mode == SHUTDOWN_MASK)
 				sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
 			else if (peer_mode & RCV_SHUTDOWN)
 				sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
-			read_unlock(&other->sk_callback_lock);
 		}
 		if (other)
 			sock_put(other);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 14c22c3..c8df6fd 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -153,15 +153,6 @@
 	}
 }
 
-static inline struct sk_buff *sock_queue_head(struct sock *sk)
-{
-	return (struct sk_buff *)&sk->sk_receive_queue;
-}
-
-#define receive_queue_for_each_skb(sk, next, skb) \
-	for (skb = sock_queue_head(sk)->next, next = skb->next; \
-	     skb != sock_queue_head(sk); skb = next, next = skb->next)
-
 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
 			  struct sk_buff_head *hitlist)
 {
@@ -169,7 +160,7 @@
 	struct sk_buff *next;
 
 	spin_lock(&x->sk_receive_queue.lock);
-	receive_queue_for_each_skb(x, next, skb) {
+	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
 		/*
 		 *	Do we have file descriptors ?
 		 */
@@ -225,7 +216,7 @@
 		 * and perform a scan on them as well.
 		 */
 		spin_lock(&x->sk_receive_queue.lock);
-		receive_queue_for_each_skb(x, next, skb) {
+		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
 			u = unix_sk(skb->sk);
 
 			/*