diff --git a/MAINTAINERS b/MAINTAINERS
index c8c0874..9b893d7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2491,7 +2491,7 @@ S:        Maintained
 F:     drivers/net/eexpress.*
 
 ETHERNET BRIDGE
-M:     Stephen Hemminger <[email protected]>
+M:     Stephen Hemminger <[email protected]>
 L:     [email protected]
 L:     [email protected]
 W:     http://www.linuxfoundation.org/en/Net:Bridge
@@ -4327,7 +4327,7 @@ S:        Supported
 F:     drivers/infiniband/hw/nes/
 
 NETEM NETWORK EMULATOR
-M:     Stephen Hemminger <[email protected]>
+M:     Stephen Hemminger <[email protected]>
 L:     [email protected]
 S:     Maintained
 F:     net/sched/sch_netem.c
@@ -5779,7 +5779,7 @@ S:        Maintained
 F:     drivers/usb/misc/sisusbvga/
 
 SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
-M:     Stephen Hemminger <[email protected]>
+M:     Stephen Hemminger <[email protected]>
 L:     [email protected]
 S:     Maintained
 F:     drivers/net/skge.*
diff --git a/Makefile b/Makefile
index 18c22d7..1a4a8cd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 0
-SUBLEVEL = 63
+SUBLEVEL = 64
 EXTRAVERSION =
 NAME = Sneaky Weasel
 
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 077735e..b2778e7 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -636,82 +636,82 @@ struct rx_buf_desc {
 #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE  
 #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE  
 
-typedef volatile u_int  freg_t;
+typedef volatile u_int ffreg_t;
 typedef u_int   rreg_t;
 
 typedef struct _ffredn_t {
-        freg_t  idlehead_high;  /* Idle cell header (high)              */
-        freg_t  idlehead_low;   /* Idle cell header (low)               */
-        freg_t  maxrate;        /* Maximum rate                         */
-        freg_t  stparms;        /* Traffic Management Parameters        */
-        freg_t  abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
-        freg_t  rm_type;        /*                                      */
-        u_int   filler5[0x17 - 0x06];
-        freg_t  cmd_reg;        /* Command register                     */
-        u_int   filler18[0x20 - 0x18];
-        freg_t  cbr_base;       /* CBR Pointer Base                     */
-        freg_t  vbr_base;       /* VBR Pointer Base                     */
-        freg_t  abr_base;       /* ABR Pointer Base                     */
-        freg_t  ubr_base;       /* UBR Pointer Base                     */
-        u_int   filler24;
-        freg_t  vbrwq_base;     /* VBR Wait Queue Base                  */
-        freg_t  abrwq_base;     /* ABR Wait Queue Base                  */
-        freg_t  ubrwq_base;     /* UBR Wait Queue Base                  */
-        freg_t  vct_base;       /* Main VC Table Base                   */
-        freg_t  vcte_base;      /* Extended Main VC Table Base          */
-        u_int   filler2a[0x2C - 0x2A];
-        freg_t  cbr_tab_beg;    /* CBR Table Begin                      */
-        freg_t  cbr_tab_end;    /* CBR Table End                        */
-        freg_t  cbr_pointer;    /* CBR Pointer                          */
-        u_int   filler2f[0x30 - 0x2F];
-        freg_t  prq_st_adr;     /* Packet Ready Queue Start Address     */
-        freg_t  prq_ed_adr;     /* Packet Ready Queue End Address       */
-        freg_t  prq_rd_ptr;     /* Packet Ready Queue read pointer      */
-        freg_t  prq_wr_ptr;     /* Packet Ready Queue write pointer     */
-        freg_t  tcq_st_adr;     /* Transmit Complete Queue Start Address*/
-        freg_t  tcq_ed_adr;     /* Transmit Complete Queue End Address  */
-        freg_t  tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
-        freg_t  tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
-        u_int   filler38[0x40 - 0x38];
-        freg_t  queue_base;     /* Base address for PRQ and TCQ         */
-        freg_t  desc_base;      /* Base address of descriptor table     */
-        u_int   filler42[0x45 - 0x42];
-        freg_t  mode_reg_0;     /* Mode register 0                      */
-        freg_t  mode_reg_1;     /* Mode register 1                      */
-        freg_t  intr_status_reg;/* Interrupt Status register            */
-        freg_t  mask_reg;       /* Mask Register                        */
-        freg_t  cell_ctr_high1; /* Total cell transfer count (high)     */
-        freg_t  cell_ctr_lo1;   /* Total cell transfer count (low)      */
-        freg_t  state_reg;      /* Status register                      */
-        u_int   filler4c[0x58 - 0x4c];
-        freg_t  curr_desc_num;  /* Contains the current descriptor num  */
-        freg_t  next_desc;      /* Next descriptor                      */
-        freg_t  next_vc;        /* Next VC                              */
-        u_int   filler5b[0x5d - 0x5b];
-        freg_t  present_slot_cnt;/* Present slot count                  */
-        u_int   filler5e[0x6a - 0x5e];
-        freg_t  new_desc_num;   /* New descriptor number                */
-        freg_t  new_vc;         /* New VC                               */
-        freg_t  sched_tbl_ptr;  /* Schedule table pointer               */
-        freg_t  vbrwq_wptr;     /* VBR wait queue write pointer         */
-        freg_t  vbrwq_rptr;     /* VBR wait queue read pointer          */
-        freg_t  abrwq_wptr;     /* ABR wait queue write pointer         */
-        freg_t  abrwq_rptr;     /* ABR wait queue read pointer          */
-        freg_t  ubrwq_wptr;     /* UBR wait queue write pointer         */
-        freg_t  ubrwq_rptr;     /* UBR wait queue read pointer          */
-        freg_t  cbr_vc;         /* CBR VC                               */
-        freg_t  vbr_sb_vc;      /* VBR SB VC                            */
-        freg_t  abr_sb_vc;      /* ABR SB VC                            */
-        freg_t  ubr_sb_vc;      /* UBR SB VC                            */
-        freg_t  vbr_next_link;  /* VBR next link                        */
-        freg_t  abr_next_link;  /* ABR next link                        */
-        freg_t  ubr_next_link;  /* UBR next link                        */
-        u_int   filler7a[0x7c-0x7a];
-        freg_t  out_rate_head;  /* Out of rate head                     */
-        u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
-        freg_t  cell_ctr_high1_nc;/* Total cell transfer count (high)   */
-        freg_t  cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
-        u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
+       ffreg_t idlehead_high;  /* Idle cell header (high)              */
+       ffreg_t idlehead_low;   /* Idle cell header (low)               */
+       ffreg_t maxrate;        /* Maximum rate                         */
+       ffreg_t stparms;        /* Traffic Management Parameters        */
+       ffreg_t abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
+       ffreg_t rm_type;        /*                                      */
+       u_int   filler5[0x17 - 0x06];
+       ffreg_t cmd_reg;        /* Command register                     */
+       u_int   filler18[0x20 - 0x18];
+       ffreg_t cbr_base;       /* CBR Pointer Base                     */
+       ffreg_t vbr_base;       /* VBR Pointer Base                     */
+       ffreg_t abr_base;       /* ABR Pointer Base                     */
+       ffreg_t ubr_base;       /* UBR Pointer Base                     */
+       u_int   filler24;
+       ffreg_t vbrwq_base;     /* VBR Wait Queue Base                  */
+       ffreg_t abrwq_base;     /* ABR Wait Queue Base                  */
+       ffreg_t ubrwq_base;     /* UBR Wait Queue Base                  */
+       ffreg_t vct_base;       /* Main VC Table Base                   */
+       ffreg_t vcte_base;      /* Extended Main VC Table Base          */
+       u_int   filler2a[0x2C - 0x2A];
+       ffreg_t cbr_tab_beg;    /* CBR Table Begin                      */
+       ffreg_t cbr_tab_end;    /* CBR Table End                        */
+       ffreg_t cbr_pointer;    /* CBR Pointer                          */
+       u_int   filler2f[0x30 - 0x2F];
+       ffreg_t prq_st_adr;     /* Packet Ready Queue Start Address     */
+       ffreg_t prq_ed_adr;     /* Packet Ready Queue End Address       */
+       ffreg_t prq_rd_ptr;     /* Packet Ready Queue read pointer      */
+       ffreg_t prq_wr_ptr;     /* Packet Ready Queue write pointer     */
+       ffreg_t tcq_st_adr;     /* Transmit Complete Queue Start Address*/
+       ffreg_t tcq_ed_adr;     /* Transmit Complete Queue End Address  */
+       ffreg_t tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
+       ffreg_t tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
+       u_int   filler38[0x40 - 0x38];
+       ffreg_t queue_base;     /* Base address for PRQ and TCQ         */
+       ffreg_t desc_base;      /* Base address of descriptor table     */
+       u_int   filler42[0x45 - 0x42];
+       ffreg_t mode_reg_0;     /* Mode register 0                      */
+       ffreg_t mode_reg_1;     /* Mode register 1                      */
+       ffreg_t intr_status_reg;/* Interrupt Status register            */
+       ffreg_t mask_reg;       /* Mask Register                        */
+       ffreg_t cell_ctr_high1; /* Total cell transfer count (high)     */
+       ffreg_t cell_ctr_lo1;   /* Total cell transfer count (low)      */
+       ffreg_t state_reg;      /* Status register                      */
+       u_int   filler4c[0x58 - 0x4c];
+       ffreg_t curr_desc_num;  /* Contains the current descriptor num  */
+       ffreg_t next_desc;      /* Next descriptor                      */
+       ffreg_t next_vc;        /* Next VC                              */
+       u_int   filler5b[0x5d - 0x5b];
+       ffreg_t present_slot_cnt;/* Present slot count                  */
+       u_int   filler5e[0x6a - 0x5e];
+       ffreg_t new_desc_num;   /* New descriptor number                */
+       ffreg_t new_vc;         /* New VC                               */
+       ffreg_t sched_tbl_ptr;  /* Schedule table pointer               */
+       ffreg_t vbrwq_wptr;     /* VBR wait queue write pointer         */
+       ffreg_t vbrwq_rptr;     /* VBR wait queue read pointer          */
+       ffreg_t abrwq_wptr;     /* ABR wait queue write pointer         */
+       ffreg_t abrwq_rptr;     /* ABR wait queue read pointer          */
+       ffreg_t ubrwq_wptr;     /* UBR wait queue write pointer         */
+       ffreg_t ubrwq_rptr;     /* UBR wait queue read pointer          */
+       ffreg_t cbr_vc;         /* CBR VC                               */
+       ffreg_t vbr_sb_vc;      /* VBR SB VC                            */
+       ffreg_t abr_sb_vc;      /* ABR SB VC                            */
+       ffreg_t ubr_sb_vc;      /* UBR SB VC                            */
+       ffreg_t vbr_next_link;  /* VBR next link                        */
+       ffreg_t abr_next_link;  /* ABR next link                        */
+       ffreg_t ubr_next_link;  /* UBR next link                        */
+       u_int   filler7a[0x7c-0x7a];
+       ffreg_t out_rate_head;  /* Out of rate head                     */
+       u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
+       ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high)   */
+       ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
+       u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
 } ffredn_t;
 
 typedef struct _rfredn_t {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fb68b12..a95256a 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1750,7 +1750,8 @@ static void virtcons_remove(struct virtio_device *vdev)
        /* Disable interrupts for vqs */
        vdev->config->reset(vdev);
        /* Finish up work that's lined up */
-       cancel_work_sync(&portdev->control_work);
+       if (use_multiport(portdev))
+               cancel_work_sync(&portdev->control_work);
 
        list_for_each_entry_safe(port, port2, &portdev->ports, list)
                unplug_port(port);
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index d1dde65..483ac00 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -263,6 +263,8 @@ static inline void dump_rawmsg(enum debuglevel level, const 
char *tag,
                CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
                CAPIMSG_CONTROL(data));
        l -= 12;
+       if (l <= 0)
+               return;
        dbgline = kmalloc(3*l, GFP_ATOMIC);
        if (!dbgline)
                return;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 4ce9e5f..d0893e4 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -78,6 +78,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
 
        skb_orphan(skb);
 
+       /* Before queueing this packet to netif_rx(),
+        * make sure dst is refcounted.
+        */
+       skb_dst_force(skb);
+
        skb->protocol = eth_type_trans(skb, dev);
 
        /* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 8f61237..80b230e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -5203,13 +5203,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                        dev->stats.rx_bytes += pkt_size;
                        dev->stats.rx_packets++;
                }
-
-               /* Work around for AMD plateform. */
-               if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
-                   (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
-                       desc->opts2 = 0;
-                       cur_rx++;
-               }
        }
 
        count = cur_rx - tp->cur_rx;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 10a99e4..eaa24fa8 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -996,14 +996,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, 
u32 set)
        return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
 }
 
-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
-       tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
-                            MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
-                            MII_TG3_AUXCTL_ACTL_TX_6DB)
+static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
+{
+       u32 val;
+       int err;
 
-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
-       tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
-                            MII_TG3_AUXCTL_ACTL_TX_6DB);
+       err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+
+       if (err)
+               return err;
+       if (enable)
+
+               val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+       else
+               val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+
+       err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+                                  val | MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+       return err;
+}
 
 static int tg3_bmcr_reset(struct tg3 *tp)
 {
@@ -1775,7 +1787,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
 
        otp = tp->phy_otp;
 
-       if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+       if (tg3_phy_toggle_auxctl_smdsp(tp, true))
                return;
 
        phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
@@ -1800,7 +1812,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
              ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
        tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
 
-       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+       tg3_phy_toggle_auxctl_smdsp(tp, false);
 }
 
 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -1848,9 +1860,9 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
-           !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+           !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
-               TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+               tg3_phy_toggle_auxctl_smdsp(tp, false);
        }
 
        val = tr32(TG3_CPMU_EEE_MODE);
@@ -1995,7 +2007,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
                             (MII_TG3_CTRL_AS_MASTER |
                              MII_TG3_CTRL_ENABLE_AS_MASTER));
 
-               err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+               err = tg3_phy_toggle_auxctl_smdsp(tp, true);
                if (err)
                        return err;
 
@@ -2016,7 +2028,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
        tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
 
-       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+       tg3_phy_toggle_auxctl_smdsp(tp, false);
 
        tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
 
@@ -2105,10 +2117,10 @@ static int tg3_phy_reset(struct tg3 *tp)
 
 out:
        if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
-           !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+           !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                tg3_phydsp_write(tp, 0x201f, 0x2aaa);
                tg3_phydsp_write(tp, 0x000a, 0x0323);
-               TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+               tg3_phy_toggle_auxctl_smdsp(tp, false);
        }
 
        if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
@@ -2117,14 +2129,14 @@ out:
        }
 
        if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
-               if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+               if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                        tg3_phydsp_write(tp, 0x000a, 0x310b);
                        tg3_phydsp_write(tp, 0x201f, 0x9506);
                        tg3_phydsp_write(tp, 0x401f, 0x14e2);
-                       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+                       tg3_phy_toggle_auxctl_smdsp(tp, false);
                }
        } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
-               if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+               if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
                        if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
                                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
@@ -2133,7 +2145,7 @@ out:
                        } else
                                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
 
-                       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+                       tg3_phy_toggle_auxctl_smdsp(tp, false);
                }
        }
 
@@ -2981,7 +2993,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 
advertise, u32 flowctrl)
        tw32(TG3_CPMU_EEE_MODE,
             tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 
-       err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+       err = tg3_phy_toggle_auxctl_smdsp(tp, true);
        if (!err) {
                u32 err2;
 
@@ -3008,7 +3020,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 
advertise, u32 flowctrl)
                        val |= MDIO_AN_EEE_ADV_1000T;
                err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
 
-               err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+               err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
                if (!err)
                        err = err2;
        }
@@ -5662,6 +5674,9 @@ static void tg3_poll_controller(struct net_device *dev)
        int i;
        struct tg3 *tp = netdev_priv(dev);
 
+       if (tg3_irq_sync(tp))
+               return;
+
        for (i = 0; i < tp->irq_cnt; i++)
                tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
 }
@@ -14981,6 +14996,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        tp->pm_cap = pm_cap;
        tp->rx_mode = TG3_DEF_RX_MODE;
        tp->tx_mode = TG3_DEF_TX_MODE;
+       tp->irq_sync = 1;
 
        if (tg3_debug > 0)
                tp->msg_enable = tg3_debug;
diff --git a/drivers/net/wireless/rtlwifi/usb.c 
b/drivers/net/wireless/rtlwifi/usb.c
index e4272b9..dbcf5a9 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, 
struct sk_buff *skb)
        WARN_ON(skb_queue_empty(&rx_queue));
        while (!skb_queue_empty(&rx_queue)) {
                _skb = skb_dequeue(&rx_queue);
-               _rtl_usb_rx_process_agg(hw, skb);
-               ieee80211_rx_irqsafe(hw, skb);
+               _rtl_usb_rx_process_agg(hw, _skb);
+               ieee80211_rx_irqsafe(hw, _skb);
        }
 }
 
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 161f207..810a472 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -152,6 +152,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct 
sk_buff *skb);
 /* Notify xenvif that ring now has space to send an skb to the frontend */
 void xenvif_notify_tx_completion(struct xenvif *vif);
 
+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(struct xenvif *vif);
+
 /* Returns number of ring slots required to send an skb to the frontend */
 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff 
*skb);
 
diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
index 1825629..5925e0b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -342,17 +342,22 @@ err:
        return err;
 }
 
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_carrier_off(struct xenvif *vif)
 {
        struct net_device *dev = vif->dev;
-       if (netif_carrier_ok(dev)) {
-               rtnl_lock();
-               netif_carrier_off(dev); /* discard queued packets */
-               if (netif_running(dev))
-                       xenvif_down(vif);
-               rtnl_unlock();
-               xenvif_put(vif);
-       }
+
+       rtnl_lock();
+       netif_carrier_off(dev); /* discard queued packets */
+       if (netif_running(dev))
+               xenvif_down(vif);
+       rtnl_unlock();
+       xenvif_put(vif);
+}
+
+void xenvif_disconnect(struct xenvif *vif)
+{
+       if (netif_carrier_ok(vif->dev))
+               xenvif_carrier_off(vif);
 
        atomic_dec(&vif->refcnt);
        wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
index 0e4851b..9068d32 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -143,7 +143,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
        atomic_dec(&netbk->netfront_count);
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+                                 u8 status);
 static void make_tx_response(struct xenvif *vif,
                             struct xen_netif_tx_request *txp,
                             s8       st);
@@ -838,7 +839,7 @@ static void netbk_tx_err(struct xenvif *vif,
 
        do {
                make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               if (cons >= end)
+               if (cons == end)
                        break;
                txp = RING_GET_REQUEST(&vif->tx, cons++);
        } while (1);
@@ -847,6 +848,13 @@ static void netbk_tx_err(struct xenvif *vif,
        xenvif_put(vif);
 }
 
+static void netbk_fatal_tx_err(struct xenvif *vif)
+{
+       netdev_err(vif->dev, "fatal error; disabling device\n");
+       xenvif_carrier_off(vif);
+       xenvif_put(vif);
+}
+
 static int netbk_count_requests(struct xenvif *vif,
                                struct xen_netif_tx_request *first,
                                struct xen_netif_tx_request *txp,
@@ -860,19 +868,22 @@ static int netbk_count_requests(struct xenvif *vif,
 
        do {
                if (frags >= work_to_do) {
-                       netdev_dbg(vif->dev, "Need more frags\n");
+                       netdev_err(vif->dev, "Need more frags\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
                if (unlikely(frags >= MAX_SKB_FRAGS)) {
-                       netdev_dbg(vif->dev, "Too many frags\n");
+                       netdev_err(vif->dev, "Too many frags\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
                memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
                       sizeof(*txp));
                if (txp->size > first->size) {
-                       netdev_dbg(vif->dev, "Frags galore\n");
+                       netdev_err(vif->dev, "Frag is bigger than frame.\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
@@ -880,8 +891,9 @@ static int netbk_count_requests(struct xenvif *vif,
                frags++;
 
                if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-                       netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
+                       netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
                                 txp->offset, txp->size);
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
        } while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -925,7 +937,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct 
xen_netbk *netbk,
                pending_idx = netbk->pending_ring[index];
                page = xen_netbk_alloc_page(netbk, skb, pending_idx);
                if (!page)
-                       return NULL;
+                       goto err;
 
                netbk->mmap_pages[pending_idx] = page;
 
@@ -949,6 +961,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct 
xen_netbk *netbk,
        }
 
        return gop;
+err:
+       /* Unwind, freeing all pages and sending error responses. */
+       while (i-- > start) {
+               xen_netbk_idx_release(netbk, (unsigned 
long)shinfo->frags[i].page,
+                                     XEN_NETIF_RSP_ERROR);
+       }
+       /* The head too, if necessary. */
+       if (start)
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+       return NULL;
 }
 
 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -957,30 +980,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
 {
        struct gnttab_copy *gop = *gopp;
        int pending_idx = *((u16 *)skb->data);
-       struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
-       struct xenvif *vif = pending_tx_info[pending_idx].vif;
-       struct xen_netif_tx_request *txp;
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        int nr_frags = shinfo->nr_frags;
        int i, err, start;
 
        /* Check status of header. */
        err = gop->status;
-       if (unlikely(err)) {
-               pending_ring_idx_t index;
-               index = pending_index(netbk->pending_prod++);
-               txp = &pending_tx_info[pending_idx].req;
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               netbk->pending_ring[index] = pending_idx;
-               xenvif_put(vif);
-       }
+       if (unlikely(err))
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
 
        /* Skip first skb fragment if it is on same page as header fragment. */
-       start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+       start = ((unsigned long)shinfo->frags[i].page == pending_idx);
 
        for (i = start; i < nr_frags; i++) {
                int j, newerr;
-               pending_ring_idx_t index;
 
                pending_idx = (unsigned long)shinfo->frags[i].page;
 
@@ -989,16 +1002,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk 
*netbk,
                if (likely(!newerr)) {
                        /* Had a previous error? Invalidate this fragment. */
                        if (unlikely(err))
-                               xen_netbk_idx_release(netbk, pending_idx);
+                               xen_netbk_idx_release(netbk, pending_idx, 
XEN_NETIF_RSP_OKAY);
                        continue;
                }
 
                /* Error on this fragment: respond to client with an error. */
-               txp = &netbk->pending_tx_info[pending_idx].req;
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               index = pending_index(netbk->pending_prod++);
-               netbk->pending_ring[index] = pending_idx;
-               xenvif_put(vif);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
@@ -1006,10 +1015,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk 
*netbk,
 
                /* First error: invalidate header and preceding fragments. */
                pending_idx = *((u16 *)skb->data);
-               xen_netbk_idx_release(netbk, pending_idx);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                for (j = start; j < i; j++) {
                        pending_idx = (unsigned long)shinfo->frags[i].page;
-                       xen_netbk_idx_release(netbk, pending_idx);
+                       xen_netbk_idx_release(netbk, pending_idx, 
XEN_NETIF_RSP_OKAY);
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
@@ -1044,7 +1053,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, 
struct sk_buff *skb)
 
                /* Take an extra reference to offset xen_netbk_idx_release */
                get_page(netbk->mmap_pages[pending_idx]);
-               xen_netbk_idx_release(netbk, pending_idx);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
        }
 }
 
@@ -1057,7 +1066,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
 
        do {
                if (unlikely(work_to_do-- <= 0)) {
-                       netdev_dbg(vif->dev, "Missing extra info\n");
+                       netdev_err(vif->dev, "Missing extra info\n");
+                       netbk_fatal_tx_err(vif);
                        return -EBADR;
                }
 
@@ -1066,8 +1076,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
                if (unlikely(!extra.type ||
                             extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
                        vif->tx.req_cons = ++cons;
-                       netdev_dbg(vif->dev,
+                       netdev_err(vif->dev,
                                   "Invalid extra type: %d\n", extra.type);
+                       netbk_fatal_tx_err(vif);
                        return -EINVAL;
                }
 
@@ -1083,13 +1094,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
                             struct xen_netif_extra_info *gso)
 {
        if (!gso->u.gso.size) {
-               netdev_dbg(vif->dev, "GSO size must not be zero.\n");
+               netdev_err(vif->dev, "GSO size must not be zero.\n");
+               netbk_fatal_tx_err(vif);
                return -EINVAL;
        }
 
        /* Currently only TCPv4 S.O. is supported. */
        if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-               netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+               netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+               netbk_fatal_tx_err(vif);
                return -EINVAL;
        }
 
@@ -1226,9 +1239,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk 
*netbk)
 
                /* Get a netif from the list with work to do. */
                vif = poll_net_schedule_list(netbk);
+               /* This can sometimes happen because the test of
+                * list_empty(net_schedule_list) at the top of the
+                * loop is unlocked.  Just go back and have another
+                * look.
+                */
                if (!vif)
                        continue;
 
+               if (vif->tx.sring->req_prod - vif->tx.req_cons >
+                   XEN_NETIF_TX_RING_SIZE) {
+                       netdev_err(vif->dev,
+                                  "Impossible number of requests. "
+                                  "req_prod %d, req_cons %d, size %ld\n",
+                                  vif->tx.sring->req_prod, vif->tx.req_cons,
+                                  XEN_NETIF_TX_RING_SIZE);
+                       netbk_fatal_tx_err(vif);
+                       continue;
+               }
+
                RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
                if (!work_to_do) {
                        xenvif_put(vif);
@@ -1256,17 +1285,14 @@ static unsigned xen_netbk_tx_build_gops(struct 
xen_netbk *netbk)
                        work_to_do = xen_netbk_get_extras(vif, extras,
                                                          work_to_do);
                        idx = vif->tx.req_cons;
-                       if (unlikely(work_to_do < 0)) {
-                               netbk_tx_err(vif, &txreq, idx);
+                       if (unlikely(work_to_do < 0))
                                continue;
-                       }
                }
 
                ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
-               if (unlikely(ret < 0)) {
-                       netbk_tx_err(vif, &txreq, idx - ret);
+               if (unlikely(ret < 0))
                        continue;
-               }
+
                idx += ret;
 
                if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1278,11 +1304,11 @@ static unsigned xen_netbk_tx_build_gops(struct 
xen_netbk *netbk)
 
                /* No crossing a page as the payload mustn't fragment. */
                if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-                       netdev_dbg(vif->dev,
+                       netdev_err(vif->dev,
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
                                   (txreq.offset&~PAGE_MASK) + txreq.size);
-                       netbk_tx_err(vif, &txreq, idx);
+                       netbk_fatal_tx_err(vif);
                        continue;
                }
 
@@ -1310,8 +1336,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk 
*netbk)
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 
                        if (netbk_set_skb_gso(vif, skb, gso)) {
+                               /* Failure in netbk_set_skb_gso is fatal. */
                                kfree_skb(skb);
-                               netbk_tx_err(vif, &txreq, idx);
                                continue;
                        }
                }
@@ -1412,7 +1438,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
                        txp->size -= data_len;
                } else {
                        /* Schedule a response immediately. */
-                       xen_netbk_idx_release(netbk, pending_idx);
+                       xen_netbk_idx_release(netbk, pending_idx, 
XEN_NETIF_RSP_OKAY);
                }
 
                if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1467,7 +1493,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
 
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+                                 u8 status)
 {
        struct xenvif *vif;
        struct pending_tx_info *pending_tx_info;
@@ -1481,7 +1508,7 @@ static void xen_netbk_idx_release(struct xen_netbk 
*netbk, u16 pending_idx)
 
        vif = pending_tx_info->vif;
 
-       make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
+       make_tx_response(vif, &pending_tx_info->req, status);
 
        index = pending_index(netbk->pending_prod++);
        netbk->pending_ring[index] = pending_idx;
diff --git a/fs/splice.c b/fs/splice.c
index 9d89008..ea92b7c 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -693,8 +693,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
                return -EINVAL;
 
        more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
-       if (sd->len < sd->total_len)
+
+       if (sd->len < sd->total_len && pipe->nrbufs > 1)
                more |= MSG_SENDPAGE_NOTLAST;
+
        return file->f_op->sendpage(file, buf->page, buf->offset,
                                    sd->len, &pos, more);
 }
diff --git a/kernel/resource.c b/kernel/resource.c
index b29b83d..d005cd3 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -736,6 +736,7 @@ static void __init __reserve_region_with_split(struct 
resource *root,
        struct resource *parent = root;
        struct resource *conflict;
        struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
+       struct resource *next_res = NULL;
 
        if (!res)
                return;
@@ -745,21 +746,46 @@ static void __init __reserve_region_with_split(struct 
resource *root,
        res->end = end;
        res->flags = IORESOURCE_BUSY;
 
-       conflict = __request_resource(parent, res);
-       if (!conflict)
-               return;
+       while (1) {
 
-       /* failed, split and try again */
-       kfree(res);
+               conflict = __request_resource(parent, res);
+               if (!conflict) {
+                       if (!next_res)
+                               break;
+                       res = next_res;
+                       next_res = NULL;
+                       continue;
+               }
 
-       /* conflict covered whole area */
-       if (conflict->start <= start && conflict->end >= end)
-               return;
+               /* conflict covered whole area */
+               if (conflict->start <= res->start &&
+                               conflict->end >= res->end) {
+                       kfree(res);
+                       WARN_ON(next_res);
+                       break;
+               }
+
+               /* failed, split and try again */
+               if (conflict->start > res->start) {
+                       end = res->end;
+                       res->end = conflict->start - 1;
+                       if (conflict->end < end) {
+                               next_res = kzalloc(sizeof(*next_res),
+                                               GFP_ATOMIC);
+                               if (!next_res) {
+                                       kfree(res);
+                                       break;
+                               }
+                               next_res->name = name;
+                               next_res->start = conflict->end + 1;
+                               next_res->end = end;
+                               next_res->flags = IORESOURCE_BUSY;
+                       }
+               } else {
+                       res->start = conflict->end + 1;
+               }
+       }
 
-       if (conflict->start > start)
-               __reserve_region_with_split(root, start, conflict->start-1, 
name);
-       if (conflict->end < end)
-               __reserve_region_with_split(root, conflict->end+1, end, name);
 }
 
 void __init reserve_region_with_split(struct resource *root,
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 3dc7f54..c3d0729 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -233,6 +233,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
        struct net_device *dev = skb->dev;
        u32 len;
 
+       if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+               goto inhdr_error;
+
        iph = ip_hdr(skb);
        opt = &(IPCB(skb)->opt);
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 01890e1..4479fd1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1803,10 +1803,13 @@ static ssize_t pktgen_thread_write(struct file *file,
                        return -EFAULT;
                i += len;
                mutex_lock(&pktgen_thread_lock);
-               pktgen_add_device(t, f);
+               ret = pktgen_add_device(t, f);
                mutex_unlock(&pktgen_thread_lock);
-               ret = count;
-               sprintf(pg_result, "OK: add_device=%s", f);
+               if (!ret) {
+                       ret = count;
+                       sprintf(pg_result, "OK: add_device=%s", f);
+               } else
+                       sprintf(pg_result, "ERROR: can not add device %s", f);
                goto out;
        }
 
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ee02ab9..758588d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -599,7 +599,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        case IP_TTL:
                if (optlen < 1)
                        goto e_inval;
-               if (val != -1 && (val < 0 || val > 255))
+               if (val != -1 && (val < 1 || val > 255))
                        goto e_inval;
                inet->uc_ttl = val;
                break;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 56545b9..7f4e391 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3588,6 +3588,11 @@ static int tcp_process_frto(struct sock *sk, int flag)
                }
        } else {
                if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+                       if (!tcp_packets_in_flight(tp)) {
+                               tcp_enter_frto_loss(sk, 2, flag);
+                               return true;
+                       }
+
                        /* Prevent sending of new data. */
                        tp->snd_cwnd = min(tp->snd_cwnd,
                                           tcp_packets_in_flight(tp));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 550fec3..d5b5f56 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -802,7 +802,8 @@ restart:
        dst_hold(&rt->dst);
        read_unlock_bh(&table->tb6_lock);
 
-       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & 
RTF_NONEXTHOP))
+       if (!dst_get_neighbour_raw(&rt->dst) &&
+           !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
                nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
        else if (!(rt->dst.flags & DST_HOST))
                nrt = rt6_alloc_clone(rt, &fl6->daddr);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3fdac77..62b86f0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1169,11 +1169,9 @@ void ieee80211_sched_scan_stopped_work(struct 
work_struct *work);
 bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
 void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
                                        bool tell_ap);
-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
-                                   bool offchannel_ps_enable);
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing,
-                                bool offchannel_ps_disable);
+                                bool enable_beaconing);
 void ieee80211_hw_roc_setup(struct ieee80211_local *local);
 
 /* interface handling */
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c55eb9d..ecc4922 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -102,8 +102,7 @@ static void ieee80211_offchannel_ps_disable(struct 
ieee80211_sub_if_data *sdata)
        ieee80211_sta_reset_conn_monitor(sdata);
 }
 
-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
-                                   bool offchannel_ps_enable)
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
 
@@ -128,8 +127,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local 
*local,
 
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
                        netif_tx_stop_all_queues(sdata->dev);
-                       if (offchannel_ps_enable &&
-                           (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+                       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
                            sdata->u.mgd.associated)
                                ieee80211_offchannel_ps_enable(sdata, true);
                }
@@ -155,8 +153,7 @@ void ieee80211_offchannel_enable_all_ps(struct 
ieee80211_local *local,
 }
 
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing,
-                                bool offchannel_ps_disable)
+                                bool enable_beaconing)
 {
        struct ieee80211_sub_if_data *sdata;
 
@@ -166,11 +163,9 @@ void ieee80211_offchannel_return(struct ieee80211_local 
*local,
                        continue;
 
                /* Tell AP we're back */
-               if (offchannel_ps_disable &&
-                   sdata->vif.type == NL80211_IFTYPE_STATION) {
-                       if (sdata->u.mgd.associated)
-                               ieee80211_offchannel_ps_disable(sdata);
-               }
+               if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+                   sdata->u.mgd.associated)
+                       ieee80211_offchannel_ps_disable(sdata);
 
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
                        clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 669d2e3..7c75741 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -314,7 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw 
*hw, bool aborted,
                if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
                        enable_beacons = true;
 
-               ieee80211_offchannel_return(local, enable_beacons, true);
+               ieee80211_offchannel_return(local, enable_beacons);
        }
 
        ieee80211_recalc_idle(local);
@@ -563,7 +563,7 @@ static void ieee80211_scan_state_leave_oper_channel(struct 
ieee80211_local *loca
        /* PS will already be in off-channel mode,
         * we do that once at the beginning of scanning.
         */
-       ieee80211_offchannel_stop_vifs(local, false);
+       ieee80211_offchannel_stop_vifs(local);
 
        /*
         * What if the nullfunc frames didn't arrive?
@@ -594,7 +594,7 @@ static void ieee80211_scan_state_enter_oper_channel(struct 
ieee80211_local *loca
         * in off-channel state..will put that back
         * on-channel at the end of scanning.
         */
-       ieee80211_offchannel_return(local, true, false);
+       ieee80211_offchannel_return(local, true);
 
        *next_delay = HZ / 5;
        local->next_scan_state = SCAN_DECISION;
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 52b758d..c9acfda 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -973,16 +973,14 @@ static void ieee80211_work_work(struct work_struct *work)
                        if (on_oper_chan != on_oper_chan2) {
                                if (on_oper_chan2) {
                                        /* going off oper channel, PS too */
-                                       ieee80211_offchannel_stop_vifs(local,
-                                                                      true);
+                                       ieee80211_offchannel_stop_vifs(local);
                                        ieee80211_hw_config(local, 0);
                                } else {
                                        /* going on channel, but leave PS
                                         * off-channel. */
                                        ieee80211_hw_config(local, 0);
                                        ieee80211_offchannel_return(local,
-                                                                   true,
-                                                                   false);
+                                                                   true);
                                }
                        } else if (tmp_chan_changed)
                                /* Still off-channel, but on some other
@@ -1085,7 +1083,7 @@ static void ieee80211_work_work(struct work_struct *work)
                 * beaconing if we were already on-oper-channel
                 * as a future optimization.
                 */
-               ieee80211_offchannel_return(local, true, true);
+               ieee80211_offchannel_return(local, true);
 
                /* give connection some time to breathe */
                run_again(local, jiffies + HZ/2);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1ab5a02..4058afe 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1349,13 +1349,15 @@ static int packet_release(struct socket *sock)
 
        packet_flush_mclist(sk);
 
-       memset(&req, 0, sizeof(req));
-
-       if (po->rx_ring.pg_vec)
+       if (po->rx_ring.pg_vec) {
+               memset(&req, 0, sizeof(req));
                packet_set_ring(sk, &req, 1, 0);
+       }
 
-       if (po->tx_ring.pg_vec)
+       if (po->tx_ring.pg_vec) {
+               memset(&req, 0, sizeof(req));
                packet_set_ring(sk, &req, 1, 1);
+       }
 
        synchronize_net();
        /*
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index c8cc24e..dbe5870a 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 /* Final destructor for endpoint.  */
 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
 {
+       int i;
+
        SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
 
        /* Free up the HMAC transform. */
@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
        sctp_inq_free(&ep->base.inqueue);
        sctp_bind_addr_free(&ep->base.bind_addr);
 
+       for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
+               memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
+
        /* Remove and free the port */
        if (sctp_sk(ep->base.sk)->bind_hash)
                sctp_put_port(ep->base.sk);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 1f2938f..2325ce6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct 
sctp_outq *q)
 
 /* Free the outqueue structure and any related pending chunks.
  */
-void sctp_outq_teardown(struct sctp_outq *q)
+static void __sctp_outq_teardown(struct sctp_outq *q)
 {
        struct sctp_transport *transport;
        struct list_head *lchunk, *temp;
@@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
                sctp_chunk_free(chunk);
        }
 
-       q->error = 0;
-
        /* Throw away any leftover control chunks. */
        list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
                list_del_init(&chunk->list);
@@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
        }
 }
 
+void sctp_outq_teardown(struct sctp_outq *q)
+{
+       __sctp_outq_teardown(q);
+       sctp_outq_init(q->asoc, q);
+}
+
 /* Free the outqueue structure and any related pending chunks.  */
 void sctp_outq_free(struct sctp_outq *q)
 {
        /* Throw away leftover chunks. */
-       sctp_outq_teardown(q);
+       __sctp_outq_teardown(q);
 
        /* If we were kmalloc()'d, free the memory.  */
        if (q->malloced)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 8ac6d0b..6b766cd 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3304,7 +3304,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
 
        ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
 out:
-       kfree(authkey);
+       kzfree(authkey);
        return ret;
 }
 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to