commit:     84492c94956fe0e05c938eddff57d95bd313437f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 17 16:21:08 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 17 16:21:08 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=84492c94

Linux patch 4.14.216

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |   4 +
 1215_linux-4.14.216.patch | 658 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 662 insertions(+)

diff --git a/0000_README b/0000_README
index 5f64f81..4987d24 100644
--- a/0000_README
+++ b/0000_README
@@ -903,6 +903,10 @@ Patch:  1214_linux-4.14.215.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.215
 
+Patch:  1215_linux-4.14.216.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.216
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1215_linux-4.14.216.patch b/1215_linux-4.14.216.patch
new file mode 100644
index 0000000..595f427
--- /dev/null
+++ b/1215_linux-4.14.216.patch
@@ -0,0 +1,658 @@
+diff --git a/Makefile b/Makefile
+index 10314ba3c7bc5..7537adc4e237d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 215
++SUBLEVEL = 216
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm/mach-omap2/omap_device.c 
b/arch/arm/mach-omap2/omap_device.c
+index acbede082b5b5..79a59fe0e773b 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -224,10 +224,12 @@ static int _omap_device_notifier_call(struct 
notifier_block *nb,
+               break;
+       case BUS_NOTIFY_BIND_DRIVER:
+               od = to_omap_device(pdev);
+-              if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+-                  pm_runtime_status_suspended(dev)) {
++              if (od) {
+                       od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+-                      pm_runtime_set_active(dev);
++                      if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
++                          pm_runtime_status_suspended(dev)) {
++                              pm_runtime_set_active(dev);
++                      }
+               }
+               break;
+       case BUS_NOTIFY_ADD_DEVICE:
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index c553f9883194f..1a006101f527a 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -470,6 +470,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const 
struct sys_reg_desc *r)
+ {
+       u64 pmcr, val;
+ 
++      /* No PMU available, PMCR_EL0 may UNDEF... */
++      if (!kvm_arm_support_pmu_v3())
++              return;
++
+       pmcr = read_sysreg(pmcr_el0);
+       /*
+        * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
+diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h 
b/arch/powerpc/include/asm/book3s/32/pgtable.h
+index 016579ef16d3d..ec98abca0df03 100644
+--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
+@@ -414,9 +414,9 @@ static inline void __set_pte_at(struct mm_struct *mm, 
unsigned long addr,
+       if (pte_val(*ptep) & _PAGE_HASHPTE)
+               flush_hash_entry(mm, ptep, addr);
+       __asm__ __volatile__("\
+-              stw%U0%X0 %2,%0\n\
++              stw%X0 %2,%0\n\
+               eieio\n\
+-              stw%U0%X0 %L2,%1"
++              stw%X1 %L2,%1"
+       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+       : "r" (pte) : "memory");
+ 
+diff --git a/arch/powerpc/include/asm/nohash/pgtable.h 
b/arch/powerpc/include/asm/nohash/pgtable.h
+index 5c68f4a59f758..e9171b8242e4b 100644
+--- a/arch/powerpc/include/asm/nohash/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/pgtable.h
+@@ -157,9 +157,9 @@ static inline void __set_pte_at(struct mm_struct *mm, 
unsigned long addr,
+               flush_hash_entry(mm, ptep, addr);
+ #endif
+       __asm__ __volatile__("\
+-              stw%U0%X0 %2,%0\n\
++              stw%X0 %2,%0\n\
+               eieio\n\
+-              stw%U0%X0 %L2,%1"
++              stw%X1 %L2,%1"
+       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+       : "r" (pte) : "memory");
+ 
+diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c 
b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+index bb33888151fb0..10d8f5cbb5107 100644
+--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+@@ -411,82 +411,68 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp)
+       kfree(rdtgrp);
+ }
+ 
+-struct task_move_callback {
+-      struct callback_head    work;
+-      struct rdtgroup         *rdtgrp;
+-};
+-
+-static void move_myself(struct callback_head *head)
++static void _update_task_closid_rmid(void *task)
+ {
+-      struct task_move_callback *callback;
+-      struct rdtgroup *rdtgrp;
+-
+-      callback = container_of(head, struct task_move_callback, work);
+-      rdtgrp = callback->rdtgrp;
+-
+       /*
+-       * If resource group was deleted before this task work callback
+-       * was invoked, then assign the task to root group and free the
+-       * resource group.
++       * If the task is still current on this CPU, update PQR_ASSOC MSR.
++       * Otherwise, the MSR is updated when the task is scheduled in.
+        */
+-      if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+-          (rdtgrp->flags & RDT_DELETED)) {
+-              current->closid = 0;
+-              current->rmid = 0;
+-              rdtgroup_remove(rdtgrp);
+-      }
+-
+-      preempt_disable();
+-      /* update PQR_ASSOC MSR to make resource group go into effect */
+-      intel_rdt_sched_in();
+-      preempt_enable();
++      if (task == current)
++              intel_rdt_sched_in();
++}
+ 
+-      kfree(callback);
++static void update_task_closid_rmid(struct task_struct *t)
++{
++      if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
++              smp_call_function_single(task_cpu(t), _update_task_closid_rmid, 
t, 1);
++      else
++              _update_task_closid_rmid(t);
+ }
+ 
+ static int __rdtgroup_move_task(struct task_struct *tsk,
+                               struct rdtgroup *rdtgrp)
+ {
+-      struct task_move_callback *callback;
+-      int ret;
+-
+-      callback = kzalloc(sizeof(*callback), GFP_KERNEL);
+-      if (!callback)
+-              return -ENOMEM;
+-      callback->work.func = move_myself;
+-      callback->rdtgrp = rdtgrp;
++      /* If the task is already in rdtgrp, no need to move the task. */
++      if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
++           tsk->rmid == rdtgrp->mon.rmid) ||
++          (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
++           tsk->closid == rdtgrp->mon.parent->closid))
++              return 0;
+ 
+       /*
+-       * Take a refcount, so rdtgrp cannot be freed before the
+-       * callback has been invoked.
++       * Set the task's closid/rmid before the PQR_ASSOC MSR can be
++       * updated by them.
++       *
++       * For ctrl_mon groups, move both closid and rmid.
++       * For monitor groups, can move the tasks only from
++       * their parent CTRL group.
+        */
+-      atomic_inc(&rdtgrp->waitcount);
+-      ret = task_work_add(tsk, &callback->work, true);
+-      if (ret) {
+-              /*
+-               * Task is exiting. Drop the refcount and free the callback.
+-               * No need to check the refcount as the group cannot be
+-               * deleted before the write function unlocks rdtgroup_mutex.
+-               */
+-              atomic_dec(&rdtgrp->waitcount);
+-              kfree(callback);
+-      } else {
+-              /*
+-               * For ctrl_mon groups move both closid and rmid.
+-               * For monitor groups, can move the tasks only from
+-               * their parent CTRL group.
+-               */
+-              if (rdtgrp->type == RDTCTRL_GROUP) {
+-                      tsk->closid = rdtgrp->closid;
++
++      if (rdtgrp->type == RDTCTRL_GROUP) {
++              tsk->closid = rdtgrp->closid;
++              tsk->rmid = rdtgrp->mon.rmid;
++      } else if (rdtgrp->type == RDTMON_GROUP) {
++              if (rdtgrp->mon.parent->closid == tsk->closid)
+                       tsk->rmid = rdtgrp->mon.rmid;
+-              } else if (rdtgrp->type == RDTMON_GROUP) {
+-                      if (rdtgrp->mon.parent->closid == tsk->closid)
+-                              tsk->rmid = rdtgrp->mon.rmid;
+-                      else
+-                              ret = -EINVAL;
+-              }
++              else
++                      return -EINVAL;
+       }
+-      return ret;
++
++      /*
++       * Ensure the task's closid and rmid are written before determining if
++       * the task is current that will decide if it will be interrupted.
++       */
++      barrier();
++
++      /*
++       * By now, the task's closid and rmid are set. If the task is current
++       * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
++       * group go into effect. If the task is not current, the MSR will be
++       * updated when the task is scheduled in.
++       */
++      update_task_closid_rmid(tsk);
++
++      return 0;
+ }
+ 
+ static int rdtgroup_task_write_permission(struct task_struct *task,
+diff --git a/block/genhd.c b/block/genhd.c
+index 449ef56bba708..3958ae39daebf 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -208,14 +208,17 @@ struct hd_struct *disk_part_iter_next(struct 
disk_part_iter *piter)
+               part = rcu_dereference(ptbl->part[piter->idx]);
+               if (!part)
+                       continue;
++              get_device(part_to_dev(part));
++              piter->part = part;
+               if (!part_nr_sects_read(part) &&
+                   !(piter->flags & DISK_PITER_INCL_EMPTY) &&
+                   !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
+-                    piter->idx == 0))
++                    piter->idx == 0)) {
++                      put_device(part_to_dev(part));
++                      piter->part = NULL;
+                       continue;
++              }
+ 
+-              get_device(part_to_dev(part));
+-              piter->part = part;
+               piter->idx += inc;
+               break;
+       }
+diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
+index 7b2df7a54d875..01091c08e9999 100644
+--- a/drivers/block/Kconfig
++++ b/drivers/block/Kconfig
+@@ -477,6 +477,7 @@ config BLK_DEV_RBD
+ config BLK_DEV_RSXX
+       tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
+       depends on PCI
++      select CRC32
+       help
+         Device driver for IBM's high speed PCIe SSD
+         storage device: Flash Adapter 900GB Full Height.
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index 062d71434e470..32bb00a6fe099 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -887,9 +887,9 @@ static int get_transition_latency(struct powernow_k8_data 
*data)
+ 
+ /* Take a frequency, and issue the fid/vid transition command */
+ static int transition_frequency_fidvid(struct powernow_k8_data *data,
+-              unsigned int index)
++              unsigned int index,
++              struct cpufreq_policy *policy)
+ {
+-      struct cpufreq_policy *policy;
+       u32 fid = 0;
+       u32 vid = 0;
+       int res;
+@@ -921,9 +921,6 @@ static int transition_frequency_fidvid(struct 
powernow_k8_data *data,
+       freqs.old = find_khz_freq_from_fid(data->currfid);
+       freqs.new = find_khz_freq_from_fid(fid);
+ 
+-      policy = cpufreq_cpu_get(smp_processor_id());
+-      cpufreq_cpu_put(policy);
+-
+       cpufreq_freq_transition_begin(policy, &freqs);
+       res = transition_fid_vid(data, fid, vid);
+       cpufreq_freq_transition_end(policy, &freqs, res);
+@@ -978,7 +975,7 @@ static long powernowk8_target_fn(void *arg)
+ 
+       powernow_k8_acpi_pst_values(data, newstate);
+ 
+-      ret = transition_frequency_fidvid(data, newstate);
++      ret = transition_frequency_fidvid(data, newstate, pol);
+ 
+       if (ret) {
+               pr_err("transition frequency failed\n");
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 314f175cf8d05..21203e3a54fda 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2360,7 +2360,7 @@ static int xilinx_dma_chan_probe(struct 
xilinx_dma_device *xdev,
+               has_dre = false;
+ 
+       if (!has_dre)
+-              xdev->common.copy_align = fls(width - 1);
++              xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 
1);
+ 
+       if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
+           of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
+@@ -2637,7 +2637,11 @@ static int xilinx_dma_probe(struct platform_device 
*pdev)
+       }
+ 
+       /* Register the DMA engine with the core */
+-      dma_async_device_register(&xdev->common);
++      err = dma_async_device_register(&xdev->common);
++      if (err) {
++              dev_err(xdev->dev, "failed to register the dma device\n");
++              goto error;
++      }
+ 
+       err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+                                        xdev);
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index bf13299ebb558..d94fb7acfa556 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -341,7 +341,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 
*entry,
+               return true;
+ 
+       if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
+-          (vma->node.start + vma->node.size - 1) >> 32)
++          (vma->node.start + vma->node.size + 4095) >> 32)
+               return true;
+ 
+       if (flags & __EXEC_OBJECT_NEEDS_MAP &&
+diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
+index 4053259bccb8d..1925c89381194 100644
+--- a/drivers/i2c/busses/i2c-sprd.c
++++ b/drivers/i2c/busses/i2c-sprd.c
+@@ -71,6 +71,8 @@
+ 
+ /* timeout (ms) for pm runtime autosuspend */
+ #define SPRD_I2C_PM_TIMEOUT   1000
++/* timeout (ms) for transfer message */
++#define I2C_XFER_TIMEOUT      1000
+ 
+ /* SPRD i2c data structure */
+ struct sprd_i2c {
+@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter 
*i2c_adap,
+                              struct i2c_msg *msg, bool is_last_msg)
+ {
+       struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
++      unsigned long time_left;
+ 
+       i2c_dev->msg = msg;
+       i2c_dev->buf = msg->buf;
+@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter 
*i2c_adap,
+ 
+       sprd_i2c_opt_start(i2c_dev);
+ 
+-      wait_for_completion(&i2c_dev->complete);
++      time_left = wait_for_completion_timeout(&i2c_dev->complete,
++                              msecs_to_jiffies(I2C_XFER_TIMEOUT));
++      if (!time_left)
++              return -ETIMEDOUT;
+ 
+       return i2c_dev->err;
+ }
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c 
b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+index e2737dc71b677..0a2c2eace3a25 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+@@ -395,13 +395,29 @@ static irqreturn_t st_lsm6dsx_handler_irq(int irq, void 
*private)
+ static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
+ {
+       struct st_lsm6dsx_hw *hw = private;
+-      int count;
++      int fifo_len = 0, len;
+ 
+-      mutex_lock(&hw->fifo_lock);
+-      count = st_lsm6dsx_read_fifo(hw);
+-      mutex_unlock(&hw->fifo_lock);
++      /*
++       * If we are using edge IRQs, new samples can arrive while
++       * processing current interrupt since there are no hw
++       * guarantees the irq line stays "low" long enough to properly
++       * detect the new interrupt. In this case the new sample will
++       * be missed.
++       * Polling FIFO status register allow us to read new
++       * samples even if the interrupt arrives while processing
++       * previous data and the timeslot where the line is "low" is
++       * too short to be properly detected.
++       */
++      do {
++              mutex_lock(&hw->fifo_lock);
++              len = st_lsm6dsx_read_fifo(hw);
++              mutex_unlock(&hw->fifo_lock);
++
++              if (len > 0)
++                      fifo_len += len;
++      } while (len > 0);
+ 
+-      return !count ? IRQ_NONE : IRQ_HANDLED;
++      return fifo_len ? IRQ_HANDLED : IRQ_NONE;
+ }
+ 
+ static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev)
+diff --git a/drivers/iommu/intel_irq_remapping.c 
b/drivers/iommu/intel_irq_remapping.c
+index 09c6b17aaf80e..25e85fdfb9d49 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -1367,6 +1367,8 @@ static int intel_irq_remapping_alloc(struct irq_domain 
*domain,
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               irq_cfg = irqd_cfg(irq_data);
+               if (!irq_data || !irq_cfg) {
++                      if (!i)
++                              kfree(data);
+                       ret = -EINVAL;
+                       goto out_free_data;
+               }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 4837045ffba37..a53d0c16668a7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -961,6 +961,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct 
mlx5e_ttc_table *ttc)
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               kfree(ft->g);
++              ft->g = NULL;
+               return -ENOMEM;
+       }
+ 
+@@ -1181,6 +1182,7 @@ static int mlx5e_create_l2_table_groups(struct 
mlx5e_l2_table *l2_table)
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               kfree(ft->g);
++              ft->g = NULL;
+               return -ENOMEM;
+       }
+ 
+@@ -1226,6 +1228,7 @@ err_destroy_groups:
+       ft->g[ft->num_groups] = NULL;
+       mlx5e_destroy_groups(ft);
+       kvfree(in);
++      kfree(ft->g);
+ 
+       return err;
+ }
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 97c4c301166c5..eef1412c058d3 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1128,7 +1128,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct 
sk_buff *skb, __le32 sign)
+        * accordingly. Otherwise, we should check here.
+        */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+-              delayed_ndp_size = ALIGN(ctx->max_ndp_size, 
ctx->tx_ndp_modulus);
++              delayed_ndp_size = ctx->max_ndp_size +
++                      max_t(u32,
++                            ctx->tx_ndp_modulus,
++                            ctx->tx_modulus + ctx->tx_remainder) - 1;
+       else
+               delayed_ndp_size = 0;
+ 
+@@ -1309,7 +1312,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff 
*skb, __le32 sign)
+       if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
+           skb_out->len > ctx->min_tx_pkt) {
+               padding_count = ctx->tx_curr_size - skb_out->len;
+-              skb_put_zero(skb_out, padding_count);
++              if (!WARN_ON(padding_count > ctx->tx_curr_size))
++                      skb_put_zero(skb_out, padding_count);
+       } else if (skb_out->len < ctx->tx_curr_size &&
+                  (skb_out->len % dev->maxpacket) == 0) {
+               skb_put_u8(skb_out, 0); /* force short packet */
+diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
+index 4e9fe75d70675..069f933b0add2 100644
+--- a/drivers/net/wan/Kconfig
++++ b/drivers/net/wan/Kconfig
+@@ -295,6 +295,7 @@ config SLIC_DS26522
+       tristate "Slic Maxim ds26522 card support"
+       depends on SPI
+       depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
++      select BITREVERSE
+       help
+         This module initializes and configures the slic maxim card
+         in T1 or E1 mode.
+diff --git a/drivers/net/wireless/ath/wil6210/Kconfig 
b/drivers/net/wireless/ath/wil6210/Kconfig
+index b448926b0c0ff..27cef55d17a75 100644
+--- a/drivers/net/wireless/ath/wil6210/Kconfig
++++ b/drivers/net/wireless/ath/wil6210/Kconfig
+@@ -1,6 +1,7 @@
+ config WIL6210
+       tristate "Wilocity 60g WiFi card wil6210 support"
+       select WANT_DEV_COREDUMP
++      select CRC32
+       depends on CFG80211
+       depends on PCI
+       default n
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 1579eb2bc29f7..06eb7d259b7f5 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1660,7 +1660,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
+               return -ENODEV;
+       }
+ 
+-      master = spi_alloc_master(dev, sizeof(struct driver_data));
++      master = devm_spi_alloc_master(dev, sizeof(*drv_data));
+       if (!master) {
+               dev_err(&pdev->dev, "cannot alloc spi_master\n");
+               pxa_ssp_free(ssp);
+@@ -1841,7 +1841,6 @@ out_error_clock_enabled:
+       free_irq(ssp->irq, drv_data);
+ 
+ out_error_master_alloc:
+-      spi_master_put(master);
+       pxa_ssp_free(ssp);
+       return status;
+ }
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 179749f354c33..d919803540510 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -299,9 +299,9 @@ static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
+ 
+       /* align packet size with data registers access */
+       if (spi->cur_bpw > 8)
+-              fthlv -= (fthlv % 2); /* multiple of 2 */
++              fthlv += (fthlv % 2) ? 1 : 0;
+       else
+-              fthlv -= (fthlv % 4); /* multiple of 4 */
++              fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
+ 
+       return fthlv;
+ }
+diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
+index 135e95950f513..5b4d4b1087c4d 100644
+--- a/fs/ubifs/io.c
++++ b/fs/ubifs/io.c
+@@ -331,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int 
pad)
+ {
+       uint32_t crc;
+ 
+-      ubifs_assert(pad >= 0 && !(pad & 7));
++      ubifs_assert(pad >= 0);
+ 
+       if (pad >= UBIFS_PAD_NODE_SZ) {
+               struct ubifs_ch *ch = buf;
+@@ -727,6 +727,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void 
*buf, int len)
+                * write-buffer.
+                */
+               memcpy(wbuf->buf + wbuf->used, buf, len);
++              if (aligned_len > len) {
++                      ubifs_assert(aligned_len - len < 8);
++                      ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len 
- len);
++              }
+ 
+               if (aligned_len == wbuf->avail) {
+                       dbg_io("flush jhead %s wbuf to LEB %d:%d",
+@@ -819,13 +823,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, 
void *buf, int len)
+       }
+ 
+       spin_lock(&wbuf->lock);
+-      if (aligned_len)
++      if (aligned_len) {
+               /*
+                * And now we have what's left and what does not take whole
+                * max. write unit, so write it to the write-buffer and we are
+                * done.
+                */
+               memcpy(wbuf->buf, buf + written, len);
++              if (aligned_len > len) {
++                      ubifs_assert(aligned_len - len < 8);
++                      ubifs_pad(c, wbuf->buf + len, aligned_len - len);
++              }
++      }
+ 
+       if (c->leb_size - wbuf->offs >= c->max_write_size)
+               wbuf->size = c->max_write_size;
+diff --git a/include/asm-generic/vmlinux.lds.h 
b/include/asm-generic/vmlinux.lds.h
+index 48e618b20d34b..404a19923ea76 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -459,7 +459,10 @@
+  */
+ #define TEXT_TEXT                                                     \
+               ALIGN_FUNCTION();                                       \
+-              *(.text.hot TEXT_MAIN .text.fixup .text.unlikely)       \
++              *(.text.hot .text.hot.*)                                \
++              *(TEXT_MAIN .text.fixup)                                \
++              *(.text.unlikely .text.unlikely.*)                      \
++              *(.text.unknown .text.unknown.*)                        \
+               *(.text..refcount)                                      \
+               *(.ref.text)                                            \
+       MEM_KEEP(init.text)                                             \
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index cf82d970b0e48..0efdf83f78e7a 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -272,7 +272,8 @@ static int register_vlan_device(struct net_device 
*real_dev, u16 vlan_id)
+       return 0;
+ 
+ out_free_newdev:
+-      if (new_dev->reg_state == NETREG_UNINITIALIZED)
++      if (new_dev->reg_state == NETREG_UNINITIALIZED ||
++          new_dev->reg_state == NETREG_UNREGISTERED)
+               free_netdev(new_dev);
+       return err;
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c4f412526dfeb..c062d340cd409 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1850,6 +1850,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned 
int len)
+               skb->csum = csum_block_sub(skb->csum,
+                                          skb_checksum(skb, len, delta, 0),
+                                          len);
++      } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++              int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
++              int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
++
++              if (offset + sizeof(__sum16) > hdlen)
++                      return -EINVAL;
+       }
+       return __pskb_trim(skb, len);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 819d51101cbd9..5ec185a9dcaba 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -312,7 +312,7 @@ static int ip_finish_output(struct net *net, struct sock 
*sk, struct sk_buff *sk
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(net, sk, skb, mtu);
+ 
+-      if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
++      if (skb->len > mtu || IPCB(skb)->frag_max_size)
+               return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
+ 
+       return ip_finish_output2(net, sk, skb);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 44cc17c43a6b5..e9cf0d1854595 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -752,7 +752,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+               goto tx_error;
+       }
+ 
+-      if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
++      df = tnl_params->frag_off;
++      if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
++              df |= (inner_iph->frag_off & htons(IP_DF));
++
++      if (tnl_update_pmtu(dev, skb, rt, df, inner_iph)) {
+               ip_rt_put(rt);
+               goto tx_error;
+       }
+@@ -780,10 +784,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+                       ttl = ip4_dst_hoplimit(&rt->dst);
+       }
+ 
+-      df = tnl_params->frag_off;
+-      if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+-              df |= (inner_iph->frag_off&htons(IP_DF));
+-
+       max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+                       + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+       if (max_headroom > dev->needed_headroom)

Reply via email to