commit:     eaef47e8b89daedd16983012997986b33004186d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Dec 18 20:59:19 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Dec 18 20:59:19 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eaef47e8

Linux patch 3.12.69

 0000_README              |    4 +
 1068_linux-3.12.69.patch | 1536 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1540 insertions(+)

diff --git a/0000_README b/0000_README
index 866b122..bb6a8f9 100644
--- a/0000_README
+++ b/0000_README
@@ -310,6 +310,10 @@ Patch:  1066_linux-3.12.67.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.67
 
+Patch:  1066_linux-3.12.68.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.68
+
 Patch:  1067_linux-3.12.68.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.68

diff --git a/1068_linux-3.12.69.patch b/1068_linux-3.12.69.patch
new file mode 100644
index 0000000..504780f
--- /dev/null
+++ b/1068_linux-3.12.69.patch
@@ -0,0 +1,1536 @@
+diff --git a/Makefile b/Makefile
+index 6d86f39be8ce..f355c0e24cd6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 68
++SUBLEVEL = 69
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
+index e751dbc527e2..7e9dfc4fcc23 100644
+--- a/arch/sparc/kernel/signal_32.c
++++ b/arch/sparc/kernel/signal_32.c
+@@ -88,7 +88,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
+       sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
+ 
+       /* 1. Make sure we are not getting garbage from the user */
+-      if (!invalid_frame_pointer(sf, sizeof(*sf)))
++      if (invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv_and_exit;
+ 
+       if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+@@ -149,7 +149,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ 
+       synchronize_user_stack();
+       sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
+-      if (!invalid_frame_pointer(sf, sizeof(*sf)))
++      if (invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv;
+ 
+       if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 4650a3840305..a648a8e66f0d 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -807,8 +807,10 @@ struct mdesc_mblock {
+ };
+ static struct mdesc_mblock *mblocks;
+ static int num_mblocks;
++static int find_numa_node_for_addr(unsigned long pa,
++                                 struct node_mem_mask *pnode_mask);
+ 
+-static unsigned long ra_to_pa(unsigned long addr)
++static unsigned long __init ra_to_pa(unsigned long addr)
+ {
+       int i;
+ 
+@@ -824,8 +826,11 @@ static unsigned long ra_to_pa(unsigned long addr)
+       return addr;
+ }
+ 
+-static int find_node(unsigned long addr)
++static int __init find_node(unsigned long addr)
+ {
++      static bool search_mdesc = true;
++      static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
++      static int last_index;
+       int i;
+ 
+       addr = ra_to_pa(addr);
+@@ -835,13 +840,30 @@ static int find_node(unsigned long addr)
+               if ((addr & p->mask) == p->val)
+                       return i;
+       }
+-      /* The following condition has been observed on LDOM guests.*/
+-      WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
+-              " rule. Some physical memory will be owned by node 0.");
+-      return 0;
++      /* The following condition has been observed on LDOM guests because
++       * node_masks only contains the best latency mask and value.
++       * LDOM guest's mdesc can contain a single latency group to
++       * cover multiple address range. Print warning message only if the
++       * address cannot be found in node_masks nor mdesc.
++       */
++      if ((search_mdesc) &&
++          ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
++              /* find the available node in the mdesc */
++              last_index = find_numa_node_for_addr(addr, &last_mem_mask);
++              numadbg("find_node: latency group for address 0x%lx is %d\n",
++                      addr, last_index);
++              if ((last_index < 0) || (last_index >= num_node_masks)) {
++                      /* WARN_ONCE() and use default group 0 */
++                      WARN_ONCE(1, "find_node: A physical address doesn't 
match a NUMA node rule. Some physical memory will be owned by node 0.");
++                      search_mdesc = false;
++                      last_index = 0;
++              }
++      }
++
++      return last_index;
+ }
+ 
+-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
++static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
+ {
+       *nid = find_node(start);
+       start += PAGE_SIZE;
+@@ -1150,6 +1172,41 @@ static struct mdesc_mlgroup * __init find_mlgroup(u64 
node)
+       return NULL;
+ }
+ 
++static int find_numa_node_for_addr(unsigned long pa,
++                                 struct node_mem_mask *pnode_mask)
++{
++      struct mdesc_handle *md = mdesc_grab();
++      u64 node, arc;
++      int i = 0;
++
++      node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
++      if (node == MDESC_NODE_NULL)
++              goto out;
++
++      mdesc_for_each_node_by_name(md, node, "group") {
++              mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
++                      u64 target = mdesc_arc_target(md, arc);
++                      struct mdesc_mlgroup *m = find_mlgroup(target);
++
++                      if (!m)
++                              continue;
++                      if ((pa & m->mask) == m->match) {
++                              if (pnode_mask) {
++                                      pnode_mask->mask = m->mask;
++                                      pnode_mask->val = m->match;
++                              }
++                              mdesc_release(md);
++                              return i;
++                      }
++              }
++              i++;
++      }
++
++out:
++      mdesc_release(md);
++      return -1;
++}
++
+ static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
+                                     int index)
+ {
+diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
+index 5d10642db63e..1edda6603cd9 100644
+--- a/arch/tile/kernel/time.c
++++ b/arch/tile/kernel/time.c
+@@ -216,8 +216,8 @@ void do_timer_interrupt(struct pt_regs *regs, int 
fault_num)
+  */
+ unsigned long long sched_clock(void)
+ {
+-      return clocksource_cyc2ns(get_cycles(),
+-                                sched_clock_mult, SCHED_CLOCK_SHIFT);
++      return mult_frac(get_cycles(),
++                       sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
+ }
+ 
+ int setup_profiling_timer(unsigned int multiplier)
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 30a2aa3782fa..879e67acf463 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -564,7 +564,7 @@ early_idt_handler_common:
+       movl %eax,%ds
+       movl %eax,%es
+ 
+-      cmpl $(__KERNEL_CS),32(%esp)
++      cmpw $(__KERNEL_CS),32(%esp)
+       jne 10f
+ 
+       leal 28(%esp),%eax      # Pointer to %eip
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index c5db2a43e730..ef486d90f318 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -428,6 +428,7 @@ static int prefer_mwait_c1_over_halt(const struct 
cpuinfo_x86 *c)
+ static void mwait_idle(void)
+ {
+       if (!current_set_polling_and_test()) {
++              trace_cpu_idle_rcuidle(1, smp_processor_id());
+               if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
+                       mb();
+                       clflush((void *)&current_thread_info()->flags);
+@@ -439,6 +440,7 @@ static void mwait_idle(void)
+                       __sti_mwait(0, 0);
+               else
+                       local_irq_enable();
++              trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+       } else
+               local_irq_enable();
+       __current_clr_polling();
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index cad86cd56f82..77d373211053 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1985,14 +1985,8 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
+ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ {
+       int rc;
+-      unsigned short sel, old_sel;
+-      struct desc_struct old_desc, new_desc;
+-      const struct x86_emulate_ops *ops = ctxt->ops;
+-
+-      /* Assignment of RIP may only fail in 64-bit mode */
+-      if (ctxt->mode == X86EMUL_MODE_PROT64)
+-              ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
+-                               VCPU_SREG_CS);
++      unsigned short sel;
++      struct desc_struct new_desc;
+ 
+       memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+ 
+@@ -2001,12 +1995,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+               return rc;
+ 
+       rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+-      if (rc != X86EMUL_CONTINUE) {
+-              WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
+-              /* assigning eip failed; restore the old cs */
+-              ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
+-              return rc;
+-      }
++      /* Error handling is not implemented. */
++      if (rc != X86EMUL_CONTINUE)
++              return X86EMUL_UNHANDLEABLE;
++
+       return rc;
+ }
+ 
+@@ -2063,14 +2055,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+       int rc;
+       unsigned long eip, cs;
+-      u16 old_cs;
+       int cpl = ctxt->ops->cpl(ctxt);
+-      struct desc_struct old_desc, new_desc;
+-      const struct x86_emulate_ops *ops = ctxt->ops;
+-
+-      if (ctxt->mode == X86EMUL_MODE_PROT64)
+-              ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
+-                               VCPU_SREG_CS);
++      struct desc_struct new_desc;
+ 
+       rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+       if (rc != X86EMUL_CONTINUE)
+@@ -2085,10 +2071,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+       rc = assign_eip_far(ctxt, eip, new_desc.l);
+-      if (rc != X86EMUL_CONTINUE) {
+-              WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
+-              ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
+-      }
++      /* Error handling is not implemented. */
++      if (rc != X86EMUL_CONTINUE)
++              return X86EMUL_UNHANDLEABLE;
++
+       return rc;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c 
b/drivers/gpu/drm/radeon/atombios_crtc.c
+index d988fff65ee5..ae2e03ea2371 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -277,6 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+                       atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+               atombios_blank_crtc(crtc, ATOM_DISABLE);
+               drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
++              /* Make sure vblank interrupt is still enabled if needed */
++              radeon_irq_set(rdev);
+               radeon_crtc_load_lut(crtc);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c 
b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index bc73021d3596..ae0d7b1cb9aa 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -331,6 +331,8 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int 
mode)
+                       WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | 
crtc_ext_cntl));
+               }
+               drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
++              /* Make sure vblank interrupt is still enabled if needed */
++              radeon_irq_set(rdev);
+               radeon_crtc_load_lut(crtc);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index e71372f86072..e6f18b241255 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -370,19 +370,57 @@ static irqreturn_t atmel_twi_interrupt(int irq, void 
*dev_id)
+ 
+       if (!irqstatus)
+               return IRQ_NONE;
+-      else if (irqstatus & AT91_TWI_RXRDY)
+-              at91_twi_read_next_byte(dev);
+-      else if (irqstatus & AT91_TWI_TXRDY)
+-              at91_twi_write_next_byte(dev);
+-
+-      /* catch error flags */
+-      dev->transfer_status |= status;
+ 
++      /*
++       * When a NACK condition is detected, the I2C controller sets the NACK,
++       * TXCOMP and TXRDY bits all together in the Status Register (SR).
++       *
++       * 1 - Handling NACK errors with CPU write transfer.
++       *
++       * In such case, we should not write the next byte into the Transmit
++       * Holding Register (THR) otherwise the I2C controller would start a new
++       * transfer and the I2C slave is likely to reply by another NACK.
++       *
++       * 2 - Handling NACK errors with DMA write transfer.
++       *
++       * By setting the TXRDY bit in the SR, the I2C controller also triggers
++       * the DMA controller to write the next data into the THR. Then the
++       * result depends on the hardware version of the I2C controller.
++       *
++       * 2a - Without support of the Alternative Command mode.
++       *
++       * This is the worst case: the DMA controller is triggered to write the
++       * next data into the THR, hence starting a new transfer: the I2C slave
++       * is likely to reply by another NACK.
++       * Concurrently, this interrupt handler is likely to be called to manage
++       * the first NACK before the I2C controller detects the second NACK and
++       * sets once again the NACK bit into the SR.
++       * When handling the first NACK, this interrupt handler disables the I2C
++       * controller interruptions, especially the NACK interrupt.
++       * Hence, the NACK bit is pending into the SR. This is why we should
++       * read the SR to clear all pending interrupts at the beginning of
++       * at91_do_twi_transfer() before actually starting a new transfer.
++       *
++       * 2b - With support of the Alternative Command mode.
++       *
++       * When a NACK condition is detected, the I2C controller also locks the
++       * THR (and sets the LOCK bit in the SR): even though the DMA controller
++       * is triggered by the TXRDY bit to write the next data into the THR,
++       * this data actually won't go on the I2C bus hence a second NACK is not
++       * generated.
++       */
+       if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
+               at91_disable_twi_interrupts(dev);
+               complete(&dev->cmd_complete);
++      } else if (irqstatus & AT91_TWI_RXRDY) {
++              at91_twi_read_next_byte(dev);
++      } else if (irqstatus & AT91_TWI_TXRDY) {
++              at91_twi_write_next_byte(dev);
+       }
+ 
++      /* catch error flags */
++      dev->transfer_status |= status;
++
+       return IRQ_HANDLED;
+ }
+ 
+@@ -390,6 +428,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ {
+       int ret;
+       bool has_unre_flag = dev->pdata->has_unre_flag;
++      unsigned sr;
+ 
+       /*
+        * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
+@@ -425,13 +464,16 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+       INIT_COMPLETION(dev->cmd_complete);
+       dev->transfer_status = 0;
+ 
++      /* Clear pending interrupts, such as NACK. */
++      sr = at91_twi_read(dev, AT91_TWI_SR);
++
+       if (!dev->buf_len) {
+               at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
+               at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+       } else if (dev->msg->flags & I2C_M_RD) {
+               unsigned start_flags = AT91_TWI_START;
+ 
+-              if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
++              if (sr & AT91_TWI_RXRDY) {
+                       dev_err(dev->dev, "RXRDY still set!");
+                       at91_twi_read(dev, AT91_TWI_RHR);
+               }
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index 32d5e40c6863..48b63e849067 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -198,8 +198,8 @@ static int blktrans_open(struct block_device *bdev, 
fmode_t mode)
+       if (!dev)
+               return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+ 
+-      mutex_lock(&dev->lock);
+       mutex_lock(&mtd_table_mutex);
++      mutex_lock(&dev->lock);
+ 
+       if (dev->open)
+               goto unlock;
+@@ -223,8 +223,8 @@ static int blktrans_open(struct block_device *bdev, 
fmode_t mode)
+ 
+ unlock:
+       dev->open++;
+-      mutex_unlock(&mtd_table_mutex);
+       mutex_unlock(&dev->lock);
++      mutex_unlock(&mtd_table_mutex);
+       blktrans_dev_put(dev);
+       return ret;
+ 
+@@ -234,8 +234,8 @@ error_release:
+ error_put:
+       module_put(dev->tr->owner);
+       kref_put(&dev->ref, blktrans_dev_release);
+-      mutex_unlock(&mtd_table_mutex);
+       mutex_unlock(&dev->lock);
++      mutex_unlock(&mtd_table_mutex);
+       blktrans_dev_put(dev);
+       return ret;
+ }
+@@ -247,8 +247,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t 
mode)
+       if (!dev)
+               return;
+ 
+-      mutex_lock(&dev->lock);
+       mutex_lock(&mtd_table_mutex);
++      mutex_lock(&dev->lock);
+ 
+       if (--dev->open)
+               goto unlock;
+@@ -262,8 +262,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t 
mode)
+               __put_mtd_device(dev->mtd);
+       }
+ unlock:
+-      mutex_unlock(&mtd_table_mutex);
+       mutex_unlock(&dev->lock);
++      mutex_unlock(&mtd_table_mutex);
+       blktrans_dev_put(dev);
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/sky2.c 
b/drivers/net/ethernet/marvell/sky2.c
+index e09a8c6f8536..798ca7be8c7f 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -5197,6 +5197,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, 
sky2_resume);
+ 
+ static void sky2_shutdown(struct pci_dev *pdev)
+ {
++      struct sky2_hw *hw = pci_get_drvdata(pdev);
++      int port;
++
++      for (port = 0; port < hw->ports; port++) {
++              struct net_device *ndev = hw->dev[port];
++
++              rtnl_lock();
++              if (netif_running(ndev)) {
++                      dev_close(ndev);
++                      netif_device_detach(ndev);
++              }
++              rtnl_unlock();
++      }
+       sky2_suspend(&pdev->dev);
+       pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+       pci_set_power_state(pdev, PCI_D3hot);
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 576c3236fa40..98ce4feb9a79 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -67,7 +67,7 @@ static struct cdev macvtap_cdev;
+ static const struct proto_ops macvtap_socket_ops;
+ 
+ #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+-                    NETIF_F_TSO6)
++                    NETIF_F_TSO6 | NETIF_F_UFO)
+ #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+ #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
+ 
+@@ -566,8 +566,6 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+                       gso_type = SKB_GSO_TCPV6;
+                       break;
+               case VIRTIO_NET_HDR_GSO_UDP:
+-                      pr_warn_once("macvtap: %s: using disabled UFO feature; 
please fix this program\n",
+-                                   current->comm);
+                       gso_type = SKB_GSO_UDP;
+                       if (skb->protocol == htons(ETH_P_IPV6))
+                               ipv6_proxy_select_ident(skb);
+@@ -615,6 +613,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff 
*skb,
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+               else if (sinfo->gso_type & SKB_GSO_TCPV6)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
++              else if (sinfo->gso_type & SKB_GSO_UDP)
++                      vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+               else
+                       BUG();
+               if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+@@ -962,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned 
long arg)
+                       if (arg & TUN_F_TSO6)
+                               feature_mask |= NETIF_F_TSO6;
+               }
++
++              if (arg & TUN_F_UFO)
++                      feature_mask |= NETIF_F_UFO;
+       }
+ 
+       /* tun/tap driver inverts the usage for TSO offloads, where
+@@ -972,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned 
long arg)
+        * When user space turns off TSO, we turn off GSO/LRO so that
+        * user-space will not receive TSO frames.
+        */
+-      if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
++      if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
+               features |= RX_OFFLOADS;
+       else
+               features &= ~RX_OFFLOADS;
+@@ -1073,7 +1076,7 @@ static long macvtap_ioctl(struct file *file, unsigned 
int cmd,
+       case TUNSETOFFLOAD:
+               /* let the user check for future flags */
+               if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+-                          TUN_F_TSO_ECN))
++                          TUN_F_TSO_ECN | TUN_F_UFO))
+                       return -EINVAL;
+ 
+               rtnl_lock();
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 46f9cb21ec56..813750d09680 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -173,7 +173,7 @@ struct tun_struct {
+       struct net_device       *dev;
+       netdev_features_t       set_features;
+ #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
+-                        NETIF_F_TSO6)
++                        NETIF_F_TSO6|NETIF_F_UFO)
+ 
+       int                     vnet_hdr_sz;
+       int                     sndbuf;
+@@ -1113,20 +1113,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, 
struct tun_file *tfile,
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+                       break;
+               case VIRTIO_NET_HDR_GSO_UDP:
+-              {
+-                      static bool warned;
+-
+-                      if (!warned) {
+-                              warned = true;
+-                              netdev_warn(tun->dev,
+-                                          "%s: using disabled UFO feature; 
please fix this program\n",
+-                                          current->comm);
+-                      }
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+                       if (skb->protocol == htons(ETH_P_IPV6))
+                               ipv6_proxy_select_ident(skb);
+                       break;
+-              }
+               default:
+                       tun->dev->stats.rx_frame_errors++;
+                       kfree_skb(skb);
+@@ -1230,6 +1220,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+                               gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+                       else if (sinfo->gso_type & SKB_GSO_TCPV6)
+                               gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
++                      else if (sinfo->gso_type & SKB_GSO_UDP)
++                              gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+                       else {
+                               pr_err("unexpected GSO type: "
+                                      "0x%x, gso_size %d, hdr_len %d\n",
+@@ -1758,6 +1750,11 @@ static int set_offload(struct tun_struct *tun, unsigned 
long arg)
+                               features |= NETIF_F_TSO6;
+                       arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
+               }
++
++              if (arg & TUN_F_UFO) {
++                      features |= NETIF_F_UFO;
++                      arg &= ~TUN_F_UFO;
++              }
+       }
+ 
+       /* This gives the user a way to test for new features in future by
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 421642af8d06..5d080516d0c5 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -438,17 +438,8 @@ static void receive_buf(struct receive_queue *rq, void 
*buf, unsigned int len)
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+                       break;
+               case VIRTIO_NET_HDR_GSO_UDP:
+-              {
+-                      static bool warned;
+-
+-                      if (!warned) {
+-                              warned = true;
+-                              netdev_warn(dev,
+-                                          "host using disabled UFO feature; 
please fix it\n");
+-                      }
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+                       break;
+-              }
+               case VIRTIO_NET_HDR_GSO_TCPV6:
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+                       break;
+@@ -763,6 +754,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff 
*skb)
+                       hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+               else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+                       hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
++              else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
++                      hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+               else
+                       BUG();
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
+@@ -1579,7 +1572,7 @@ static int virtnet_probe(struct virtio_device *vdev)
+                       dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ 
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+-                      dev->hw_features |= NETIF_F_TSO
++                      dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
+                               | NETIF_F_TSO_ECN | NETIF_F_TSO6;
+               }
+               /* Individual feature bits: what can host handle? */
+@@ -1589,9 +1582,11 @@ static int virtnet_probe(struct virtio_device *vdev)
+                       dev->hw_features |= NETIF_F_TSO6;
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+                       dev->hw_features |= NETIF_F_TSO_ECN;
++              if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
++                      dev->hw_features |= NETIF_F_UFO;
+ 
+               if (gso)
+-                      dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
++                      dev->features |= dev->hw_features & 
(NETIF_F_ALL_TSO|NETIF_F_UFO);
+               /* (!csum && gso) case will be fixed by register_netdev() */
+       }
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+@@ -1626,7 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+       /* If we can receive ANY GSO packets, we must allocate large ones. */
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+-          virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
++          virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
++          virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
+               vi->big_packets = true;
+ 
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+@@ -1812,9 +1808,9 @@ static struct virtio_device_id id_table[] = {
+ static unsigned int features[] = {
+       VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
+       VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+-      VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
++      VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
+       VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+-      VIRTIO_NET_F_GUEST_ECN,
++      VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
+       VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
+       VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+       VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c 
b/drivers/net/wireless/mwifiex/cfg80211.c
+index fbad00a5abc8..a664ec1f3b93 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -1693,8 +1693,9 @@ done:
+                       is_scanning_required = 1;
+               } else {
+                       dev_dbg(priv->adapter->dev,
+-                              "info: trying to associate to '%s' bssid %pM\n",
+-                              (char *) req_ssid.ssid, bss->bssid);
++                              "info: trying to associate to '%.*s' bssid 
%pM\n",
++                              req_ssid.ssid_len, (char *)req_ssid.ssid,
++                              bss->bssid);
+                       memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
+                       break;
+               }
+@@ -1735,8 +1736,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct 
net_device *dev,
+               return -EINVAL;
+       }
+ 
+-      wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
+-                (char *) sme->ssid, sme->bssid);
++      wiphy_dbg(wiphy, "info: Trying to associate to %.*s and bssid %pM\n",
++                (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
+ 
+       ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
+                                    priv->bss_mode, sme->channel, sme, 0);
+@@ -1859,8 +1860,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct 
net_device *dev,
+               goto done;
+       }
+ 
+-      wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
+-                (char *) params->ssid, params->bssid);
++      wiphy_dbg(wiphy, "info: trying to join to %.*s and bssid %pM\n",
++                params->ssid_len, (char *)params->ssid, params->bssid);
+ 
+       mwifiex_set_ibss_params(priv, params);
+ 
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index 6bc9b12ba42a..da26bc899d5f 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -362,7 +362,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
+ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+                              void *arg)
+ {
+-      struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++      struct pci_dev *tdev = pci_get_slot(dev->bus,
++                                          PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+       ssize_t ret;
+ 
+       if (!tdev)
+@@ -376,7 +377,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t 
pos, size_t count,
+ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
+                               const void *arg)
+ {
+-      struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++      struct pci_dev *tdev = pci_get_slot(dev->bus,
++                                          PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+       ssize_t ret;
+ 
+       if (!tdev)
+@@ -393,22 +395,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
+       .release = pci_vpd_pci22_release,
+ };
+ 
+-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
+-{
+-      struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+-      int ret = 0;
+-
+-      if (!tdev)
+-              return -ENODEV;
+-      if (!tdev->vpd || !tdev->multifunction ||
+-          dev->class != tdev->class || dev->vendor != tdev->vendor ||
+-          dev->device != tdev->device)
+-              ret = -ENODEV;
+-
+-      pci_dev_put(tdev);
+-      return ret;
+-}
+-
+ int pci_vpd_pci22_init(struct pci_dev *dev)
+ {
+       struct pci_vpd_pci22 *vpd;
+@@ -417,12 +403,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
+       cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+       if (!cap)
+               return -ENODEV;
+-      if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+-              int ret = pci_vpd_f0_dev_check(dev);
+ 
+-              if (ret)
+-                      return ret;
+-      }
+       vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+       if (!vpd)
+               return -ENOMEM;
+diff --git a/drivers/pci/pcie/aer/aer_inject.c 
b/drivers/pci/pcie/aer/aer_inject.c
+index 587e7e853107..3b507a512487 100644
+--- a/drivers/pci/pcie/aer/aer_inject.c
++++ b/drivers/pci/pcie/aer/aer_inject.c
+@@ -283,20 +283,6 @@ out:
+       return 0;
+ }
+ 
+-static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+-{
+-      while (1) {
+-              if (!pci_is_pcie(dev))
+-                      break;
+-              if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+-                      return dev;
+-              if (!dev->bus->self)
+-                      break;
+-              dev = dev->bus->self;
+-      }
+-      return NULL;
+-}
+-
+ static int find_aer_device_iter(struct device *device, void *data)
+ {
+       struct pcie_device **result = data;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index cb245bd510a2..b1f5f51b0196 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1894,11 +1894,27 @@ static void quirk_netmos(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+                        PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+ 
++/*
++ * Quirk non-zero PCI functions to route VPD access through function 0 for
++ * devices that share VPD resources between functions.  The functions are
++ * expected to be identical devices.
++ */
+ static void quirk_f0_vpd_link(struct pci_dev *dev)
+ {
+-      if (!dev->multifunction || !PCI_FUNC(dev->devfn))
++      struct pci_dev *f0;
++
++      if (!PCI_FUNC(dev->devfn))
+               return;
+-      dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++
++      f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
++      if (!f0)
++              return;
++
++      if (f0->vpd && dev->class == f0->class &&
++          dev->vendor == f0->vendor && dev->device == f0->device)
++              dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++
++      pci_dev_put(f0);
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+                             PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index 809b5ab9074c..60d74a666c62 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -364,6 +364,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+               if (test_bit(PWMF_EXPORTED, &pwm->flags))
+                       pwm_unexport_child(parent, pwm);
+       }
++
++      put_device(parent);
+ }
+ 
+ static int __init pwm_sysfs_init(void)
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c 
b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index a961fe11b527..ae1db5499ca6 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -3516,6 +3516,10 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 
ioc_status)
+           SAM_STAT_CHECK_CONDITION;
+ }
+ 
++static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
++{
++      return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
++}
+ 
+ /**
+  * _scsih_qcmd_lck - main scsi request entry point
+@@ -3544,6 +3548,13 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void 
(*done)(struct scsi_cmnd *))
+               scsi_print_command(scmd);
+ #endif
+ 
++      /*
++       * Lock the device for any subsequent command until command is
++       * done.
++       */
++      if (ata_12_16_cmd(scmd))
++              scsi_internal_device_block(scmd->device);
++
+       scmd->scsi_done = done;
+       sas_device_priv_data = scmd->device->hostdata;
+       if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+@@ -4047,6 +4058,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 
msix_index, u32 reply)
+       if (scmd == NULL)
+               return 1;
+ 
++      if (ata_12_16_cmd(scmd))
++              scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
++
+       mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ 
+       if (mpi_reply == NULL) {
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index d93ceeabed27..02f14e3ed220 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -192,7 +192,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
+ {
+       struct n_tty_data *ldata = tty->disc_data;
+ 
+-      tty_audit_add_data(tty, to, n, ldata->icanon);
++      tty_audit_add_data(tty, from, n, ldata->icanon);
+       return copy_to_user(to, from, n);
+ }
+ 
+diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
+index a4fdce74f883..3a5ddc1bf1fa 100644
+--- a/drivers/tty/tty_audit.c
++++ b/drivers/tty/tty_audit.c
+@@ -264,7 +264,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct 
tty_struct *tty,
+  *
+  *    Audit @data of @size from @tty, if necessary.
+  */
+-void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
++void tty_audit_add_data(struct tty_struct *tty, const void *data,
+                       size_t size, unsigned icanon)
+ {
+       struct tty_audit_buf *buf;
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 7e8dceb4c634..f7ef78abce4b 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -491,6 +491,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+               return -ENOMEM;
+       }
+ 
++      spin_lock_init(&ci->lock);
+       ci->dev = dev;
+       ci->platdata = dev->platform_data;
+       if (ci->platdata->phy)
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 455e4e6b9926..837b39975bc2 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1777,8 +1777,6 @@ static int udc_start(struct ci_hdrc *ci)
+       struct device *dev = ci->dev;
+       int retval = 0;
+ 
+-      spin_lock_init(&ci->lock);
+-
+       ci->gadget.ops          = &usb_gadget_ops;
+       ci->gadget.speed        = USB_SPEED_UNKNOWN;
+       ci->gadget.max_speed    = USB_SPEED_HIGH;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 188e50446514..8b3e77716c4a 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB 
Device */
+       { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
++      { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
+       { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
+       { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+       { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 62ec56e379a0..d1b76b0a67df 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1026,6 +1026,8 @@ static struct usb_device_id id_table_combined [] = {
+       { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+       { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+       { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
++      { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
++              .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { }                                     /* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h 
b/drivers/usb/serial/ftdi_sio_ids.h
+index db1a9b3a5f38..7b2f2056b7ef 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -596,6 +596,12 @@
+ #define STK541_PID            0x2109 /* Zigbee Controller */
+ 
+ /*
++ * Texas Instruments
++ */
++#define TI_VID                        0x0451
++#define TI_CC3200_LAUNCHPAD_PID       0xC32A /* SimpleLink Wi-Fi CC3200 
LaunchPad */
++
++/*
+  * Blackfin gnICE JTAG
+  * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
+  */
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index b1d815eb6d0b..8988b268a69a 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -919,10 +919,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct 
us_data *us)
+ 
+       /* COMMAND STAGE */
+       /* let's send the command via the control pipe */
++      /*
++       * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
++       * Stack may be vmallocated.  So no DMA for us.  Make a copy.
++       */
++      memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
+       result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
+                                     US_CBI_ADSC, 
+                                     USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 
+-                                    us->ifnum, srb->cmnd, srb->cmd_len);
++                                    us->ifnum, us->iobuf, srb->cmd_len);
+ 
+       /* check the return code for the command */
+       usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 81562314df8c..00bc7bb96072 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1787,6 +1787,20 @@ static inline int pci_pcie_type(const struct pci_dev 
*dev)
+       return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
+ }
+ 
++static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
++{
++      while (1) {
++              if (!pci_is_pcie(dev))
++                      break;
++              if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
++                      return dev;
++              if (!dev->bus->self)
++                      break;
++              dev = dev->bus->self;
++      }
++      return NULL;
++}
++
+ void pci_request_acs(void);
+ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+ bool pci_acs_path_enabled(struct pci_dev *start,
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 96c23247a332..31cf8965ace8 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -578,7 +578,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+ 
+ /* tty_audit.c */
+ #ifdef CONFIG_AUDIT
+-extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
++extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
+                              size_t size, unsigned icanon);
+ extern void tty_audit_exit(void);
+ extern void tty_audit_fork(struct signal_struct *sig);
+@@ -586,8 +586,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char 
ch);
+ extern void tty_audit_push(struct tty_struct *tty);
+ extern int tty_audit_push_current(void);
+ #else
+-static inline void tty_audit_add_data(struct tty_struct *tty,
+-              unsigned char *data, size_t size, unsigned icanon)
++static inline void tty_audit_add_data(struct tty_struct *tty, const void 
*data,
++                                    size_t size, unsigned icanon)
+ {
+ }
+ static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+diff --git a/kernel/panic.c b/kernel/panic.c
+index de5924c75b1b..639255d5e5e1 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -138,8 +138,11 @@ void panic(const char *fmt, ...)
+        * We may have ended up stopping the CPU holding the lock (in
+        * smp_send_stop()) while still having some valuable data in the console
+        * buffer.  Try to acquire the lock then release it regardless of the
+-       * result.  The release will also print the buffers out.
++       * result.  The release will also print the buffers out.  Locks debug
++       * should be disabled to avoid reporting bad unlock balance when
++       * panic() is not being callled from OOPS.
+        */
++      debug_locks_off();
+       console_flush_on_panic();
+ 
+       if (!panic_blink)
+diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
+index 511e6b47c594..a55fec567108 100644
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -2274,6 +2274,7 @@ static int rcu_nocb_kthread(void *arg)
+                               cl++;
+                       c++;
+                       local_bh_enable();
++                      cond_resched();
+                       list = next;
+               }
+               trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
+index 5464c8744ea9..e24388a863a7 100644
+--- a/lib/mpi/mpi-pow.c
++++ b/lib/mpi/mpi-pow.c
+@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
+       if (!esize) {
+               /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
+                * depending on if MOD equals 1.  */
+-              rp[0] = 1;
+               res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
++              if (res->nlimbs) {
++                      if (mpi_resize(res, 1) < 0)
++                              goto enomem;
++                      rp = res->d;
++                      rp[0] = 1;
++              }
+               res->sign = 0;
+               goto leave;
+       }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 73c6093e136a..7fa427ed41bc 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -728,7 +728,7 @@ int sock_setsockopt(struct socket *sock, int level, int 
optname,
+               val = min_t(u32, val, sysctl_wmem_max);
+ set_sndbuf:
+               sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+-              sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
++              sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+               /* Wake up sending tasks if we upped the value. */
+               sk->sk_write_space(sk);
+               break;
+@@ -764,7 +764,7 @@ set_rcvbuf:
+                * returning the value we actually used in getsockopt
+                * is the most desirable behavior.
+                */
+-              sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
++              sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+               break;
+ 
+       case SO_RCVBUFFORCE:
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 294c642fbebb..4332b7c25af0 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -742,6 +742,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
+ {
+       const struct dccp_hdr *dh;
+       unsigned int cscov;
++      u8 dccph_doff;
+ 
+       if (skb->pkt_type != PACKET_HOST)
+               return 1;
+@@ -763,18 +764,19 @@ int dccp_invalid_packet(struct sk_buff *skb)
+       /*
+        * If P.Data Offset is too small for packet type, drop packet and return
+        */
+-      if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
+-              DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
++      dccph_doff = dh->dccph_doff;
++      if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
++              DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
+               return 1;
+       }
+       /*
+        * If P.Data Offset is too too large for packet, drop packet and return
+        */
+-      if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
+-              DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
++      if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
++              DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
+               return 1;
+       }
+-
++      dh = dccp_hdr(skb);
+       /*
+        * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
+        * has short sequence numbers), drop packet and return
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index b4cdc79a7fc8..e6353e25cf03 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -97,6 +97,9 @@ int __ip_local_out(struct sk_buff *skb)
+ 
+       iph->tot_len = htons(skb->len);
+       ip_send_check(iph);
++
++      skb->protocol = htons(ETH_P_IP);
++
+       return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
+                      skb_dst(skb)->dev, dst_output);
+ }
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 716dff49d0b9..6de66893a488 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -655,6 +655,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, 
size_t len,
+       if (len > 0xFFFF)
+               return -EMSGSIZE;
+ 
++      /* Must have at least a full ICMP header. */
++      if (len < icmph_len)
++              return -EINVAL;
++
+       /*
+        *      Check the flags.
+        */
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index cfdb663e0259..9a625b1ae10f 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -950,12 +950,21 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
+       struct ipv6_tel_txoption opt;
+       struct dst_entry *dst = NULL, *ndst = NULL;
+       struct net_device *tdev;
++      bool use_cache = false;
+       int mtu;
+       unsigned int max_headroom = sizeof(struct ipv6hdr);
+       u8 proto;
+       int err = -1;
+ 
+-      if (!fl6->flowi6_mark)
++      if (!(t->parms.flags &
++                   (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
++              /* enable the cache only only if the routing decision does
++               * not depend on the current inner header value
++               */
++              use_cache = true;
++      }
++
++      if (use_cache)
+               dst = ip6_tnl_dst_check(t);
+       if (!dst) {
+               ndst = ip6_route_output(net, NULL, fl6);
+@@ -1013,7 +1022,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
+               consume_skb(skb);
+               skb = new_skb;
+       }
+-      if (fl6->flowi6_mark) {
++      if (!use_cache) {
+               skb_dst_set(skb, dst);
+               ndst = NULL;
+       } else {
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 4bd870af05d6..ab40997a1c2a 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -117,6 +117,8 @@ int __ip6_local_out(struct sk_buff *skb)
+               len = 0;
+       ipv6_hdr(skb)->payload_len = htons(len);
+ 
++      skb->protocol = htons(ETH_P_IPV6);
++
+       return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+                      skb_dst(skb)->dev, dst_output);
+ }
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 07f8b97f9ae9..81f317f841b4 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       int ret;
+       int chk_addr_ret;
+ 
+-      if (!sock_flag(sk, SOCK_ZAPPED))
+-              return -EINVAL;
+       if (addr_len < sizeof(struct sockaddr_l2tpip))
+               return -EINVAL;
+       if (addr->l2tp_family != AF_INET)
+@@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       read_unlock_bh(&l2tp_ip_lock);
+ 
+       lock_sock(sk);
++      if (!sock_flag(sk, SOCK_ZAPPED))
++              goto out;
++
+       if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct 
sockaddr_l2tpip))
+               goto out;
+ 
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index eadfb3031ed2..7c1a288f0b20 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -266,8 +266,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       int addr_type;
+       int err;
+ 
+-      if (!sock_flag(sk, SOCK_ZAPPED))
+-              return -EINVAL;
+       if (addr->l2tp_family != AF_INET6)
+               return -EINVAL;
+       if (addr_len < sizeof(*addr))
+@@ -293,6 +291,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       lock_sock(sk);
+ 
+       err = -EINVAL;
++      if (!sock_flag(sk, SOCK_ZAPPED))
++              goto out_unlock;
++
+       if (sk->sk_state != TCP_CLOSE)
+               goto out_unlock;
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 3f9804b2802a..40d82575adc1 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3115,19 +3115,25 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+ 
+               if (optlen != sizeof(val))
+                       return -EINVAL;
+-              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-                      return -EBUSY;
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
+               switch (val) {
+               case TPACKET_V1:
+               case TPACKET_V2:
+               case TPACKET_V3:
+-                      po->tp_version = val;
+-                      return 0;
++                      break;
+               default:
+                       return -EINVAL;
+               }
++              lock_sock(sk);
++              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++                      ret = -EBUSY;
++              } else {
++                      po->tp_version = val;
++                      ret = 0;
++              }
++              release_sock(sk);
++              return ret;
+       }
+       case PACKET_RESERVE:
+       {
+@@ -3584,6 +3590,7 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+       /* Added to avoid minimal code churn */
+       struct tpacket_req *req = &req_u->req;
+ 
++      lock_sock(sk);
+       /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+       if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+               WARN(1, "Tx-ring is not supported.\n");
+@@ -3665,7 +3672,6 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+                       goto out;
+       }
+ 
+-      lock_sock(sk);
+ 
+       /* Detach socket from network */
+       spin_lock(&po->bind_lock);
+@@ -3714,11 +3720,11 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+               if (!tx_ring)
+                       prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+       }
+-      release_sock(sk);
+ 
+       if (pg_vec)
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
++      release_sock(sk);
+       return err;
+ }
+ 
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 7ed78c9e505c..3f385130e9c0 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -124,6 +124,17 @@ static int tcf_pedit_cleanup(struct tc_action *a, int 
bind)
+       return 0;
+ }
+ 
++static bool offset_valid(struct sk_buff *skb, int offset)
++{
++      if (offset > 0 && offset > skb->len)
++              return false;
++
++      if  (offset < 0 && -offset > skb_headroom(skb))
++              return false;
++
++      return true;
++}
++
+ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
+                    struct tcf_result *res)
+ {
+@@ -150,6 +161,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct 
tc_action *a,
+                       if (tkey->offmask) {
+                               char *d, _d;
+ 
++                              if (!offset_valid(skb, off + tkey->at)) {
++                                      pr_info("tc filter pedit 'at' offset %d 
out of bounds\n",
++                                              off + tkey->at);
++                                      goto bad;
++                              }
+                               d = skb_header_pointer(skb, off + tkey->at, 1,
+                                                      &_d);
+                               if (!d)
+@@ -162,10 +178,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct 
tc_action *a,
+                                       " offset must be on 32 bit 
boundaries\n");
+                               goto bad;
+                       }
+-                      if (offset > 0 && offset > skb->len) {
+-                              pr_info("tc filter pedit"
+-                                      " offset %d can't exceed pkt length 
%d\n",
+-                                     offset, skb->len);
++
++                      if (!offset_valid(skb, off + offset)) {
++                              pr_info("tc filter pedit offset %d out of 
bounds\n",
++                                      offset);
+                               goto bad;
+                       }
+ 
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 3159e9c284c5..93917ffe1061 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -61,6 +61,7 @@ struct cfg80211_registered_device {
+       struct list_head bss_list;
+       struct rb_root bss_tree;
+       u32 bss_generation;
++      u32 bss_entries;
+       struct cfg80211_scan_request *scan_req; /* protected by RTNL */
+       struct cfg80211_sched_scan_request *sched_scan_req;
+       unsigned long suspend_at;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index d4397eba5408..8e5f5a706c95 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -55,6 +55,19 @@
+  * also linked into the probe response struct.
+  */
+ 
++/*
++ * Limit the number of BSS entries stored in mac80211. Each one is
++ * a bit over 4k at most, so this limits to roughly 4-5M of memory.
++ * If somebody wants to really attack this though, they'd likely
++ * use small beacons, and only one type of frame, limiting each of
++ * the entries to a much smaller size (in order to generate more
++ * entries in total, so overhead is bigger.)
++ */
++static int bss_entries_limit = 1000;
++module_param(bss_entries_limit, int, 0644);
++MODULE_PARM_DESC(bss_entries_limit,
++                 "limit to number of scan BSS entries (per wiphy, default 
1000)");
++
+ #define IEEE80211_SCAN_RESULT_EXPIRE  (30 * HZ)
+ 
+ static void bss_free(struct cfg80211_internal_bss *bss)
+@@ -135,6 +148,10 @@ static bool __cfg80211_unlink_bss(struct 
cfg80211_registered_device *dev,
+ 
+       list_del_init(&bss->list);
+       rb_erase(&bss->rbn, &dev->bss_tree);
++      dev->bss_entries--;
++      WARN_ONCE((dev->bss_entries == 0) ^ list_empty(&dev->bss_list),
++                "rdev bss entries[%d]/list[empty:%d] corruption\n",
++                dev->bss_entries, list_empty(&dev->bss_list));
+       bss_ref_put(dev, bss);
+       return true;
+ }
+@@ -339,6 +356,40 @@ void cfg80211_bss_expire(struct 
cfg80211_registered_device *dev)
+       __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
+ }
+ 
++static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device 
*rdev)
++{
++      struct cfg80211_internal_bss *bss, *oldest = NULL;
++      bool ret;
++
++      lockdep_assert_held(&rdev->bss_lock);
++
++      list_for_each_entry(bss, &rdev->bss_list, list) {
++              if (atomic_read(&bss->hold))
++                      continue;
++
++              if (!list_empty(&bss->hidden_list) &&
++                  !bss->pub.hidden_beacon_bss)
++                      continue;
++
++              if (oldest && time_before(oldest->ts, bss->ts))
++                      continue;
++              oldest = bss;
++      }
++
++      if (WARN_ON(!oldest))
++              return false;
++
++      /*
++       * The callers make sure to increase rdev->bss_generation if anything
++       * gets removed (and a new entry added), so there's no need to also do
++       * it here.
++       */
++
++      ret = __cfg80211_unlink_bss(rdev, oldest);
++      WARN_ON(!ret);
++      return ret;
++}
++
+ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+ {
+       while (len > 2 && ies[0] != eid) {
+@@ -620,6 +671,7 @@ static bool cfg80211_combine_bsses(struct 
cfg80211_registered_device *dev,
+       const u8 *ie;
+       int i, ssidlen;
+       u8 fold = 0;
++      u32 n_entries = 0;
+ 
+       ies = rcu_access_pointer(new->pub.beacon_ies);
+       if (WARN_ON(!ies))
+@@ -643,6 +695,12 @@ static bool cfg80211_combine_bsses(struct 
cfg80211_registered_device *dev,
+       /* This is the bad part ... */
+ 
+       list_for_each_entry(bss, &dev->bss_list, list) {
++              /*
++               * we're iterating all the entries anyway, so take the
++               * opportunity to validate the list length accounting
++               */
++              n_entries++;
++
+               if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
+                       continue;
+               if (bss->pub.channel != new->pub.channel)
+@@ -674,6 +732,10 @@ static bool cfg80211_combine_bsses(struct 
cfg80211_registered_device *dev,
+                                  new->pub.beacon_ies);
+       }
+ 
++      WARN_ONCE(n_entries != dev->bss_entries,
++                "rdev bss entries[%d]/list[len:%d] corruption\n",
++                dev->bss_entries, n_entries);
++
+       return true;
+ }
+ 
+@@ -819,7 +881,14 @@ cfg80211_bss_update(struct cfg80211_registered_device 
*dev,
+                       }
+               }
+ 
++              if (dev->bss_entries >= bss_entries_limit &&
++                  !cfg80211_bss_expire_oldest(dev)) {
++                      kfree(new);
++                      goto drop;
++              }
++
+               list_add_tail(&new->list, &dev->bss_list);
++              dev->bss_entries++;
+               rb_insert_bss(dev, new);
+               found = new;
+       }
+diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
+index 26c607c971f5..0c23888b9816 100644
+--- a/security/apparmor/domain.c
++++ b/security/apparmor/domain.c
+@@ -629,8 +629,8 @@ int aa_change_hat(const char *hats[], int count, u64 
token, bool permtest)
+       /* released below */
+       cred = get_current_cred();
+       cxt = cred_cxt(cred);
+-      profile = aa_cred_profile(cred);
+-      previous_profile = cxt->previous;
++      profile = aa_get_newest_profile(aa_cred_profile(cred));
++      previous_profile = aa_get_newest_profile(cxt->previous);
+ 
+       if (unconfined(profile)) {
+               info = "unconfined";
+@@ -726,6 +726,8 @@ audit:
+ out:
+       aa_put_profile(hat);
+       kfree(name);
++      aa_put_profile(profile);
++      aa_put_profile(previous_profile);
+       put_cred(cred);
+ 
+       return error;
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index e1ef106c8a6f..066e91ce9de9 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1857,10 +1857,10 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream 
*substream)
+       if (substream->timer_running)
+               snd_timer_interrupt(substream->timer, 1);
+  _end:
++      kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+       snd_pcm_stream_unlock_irqrestore(substream, flags);
+       if (runtime->transfer_ack_end)
+               runtime->transfer_ack_end(substream);
+-      kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ }
+ 
+ EXPORT_SYMBOL(snd_pcm_period_elapsed);

Reply via email to