commit:     7da405c97751536cd52f43a2352274ade231a87d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Dec 17 11:40:32 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Dec 17 11:40:32 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7da405c9

proj/linux-patches: Linux patch 4.14.89

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1088_linux-4.14.89.patch | 3343 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3347 insertions(+)

diff --git a/0000_README b/0000_README
index cd0b9dc..1e80ac5 100644
--- a/0000_README
+++ b/0000_README
@@ -395,6 +395,10 @@ Patch:  1087_4.14.88.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.88
 
+Patch:  1088_4.14.89.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.89
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1088_linux-4.14.89.patch b/1088_linux-4.14.89.patch
new file mode 100644
index 0000000..ecfddf0
--- /dev/null
+++ b/1088_linux-4.14.89.patch
@@ -0,0 +1,3343 @@
+diff --git a/Makefile b/Makefile
+index 3fdee40861a1..b83477be8d0c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 88
++SUBLEVEL = 89
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi 
b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+index c335b923753a..a7883676f675 100644
+--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+@@ -123,7 +123,7 @@
+ };
+ 
+ &mmc3 {
+-      interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
++      interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
+       pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
+       pinctrl-names = "default";
+       vmmc-supply = <&wl12xx_vmmc>;
+diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
+index b1a26b42d190..a8e4b89097d9 100644
+--- a/arch/arm/boot/dts/sama5d2.dtsi
++++ b/arch/arm/boot/dts/sama5d2.dtsi
+@@ -308,7 +308,7 @@
+                                 0x1 0x0 0x60000000 0x10000000
+                                 0x2 0x0 0x70000000 0x10000000
+                                 0x3 0x0 0x80000000 0x10000000>;
+-                      clocks = <&mck>;
++                      clocks = <&h32ck>;
+                       status = "disabled";
+ 
+                       nand_controller: nand-controller {
+diff --git a/arch/arm/mach-omap1/board-ams-delta.c 
b/arch/arm/mach-omap1/board-ams-delta.c
+index 6cbc69c92913..4174fa86bfb1 100644
+--- a/arch/arm/mach-omap1/board-ams-delta.c
++++ b/arch/arm/mach-omap1/board-ams-delta.c
+@@ -512,6 +512,9 @@ static void modem_pm(struct uart_port *port, unsigned int 
state, unsigned old)
+       struct modem_private_data *priv = port->private_data;
+       int ret;
+ 
++      if (!priv)
++              return;
++
+       if (IS_ERR(priv->regulator))
+               return;
+ 
+diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
+index 1c0c1663f078..5affa9f5300b 100644
+--- a/arch/arm/mach-omap2/prm44xx.c
++++ b/arch/arm/mach-omap2/prm44xx.c
+@@ -344,7 +344,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
+  * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
+  * omap44xx_prm_reconfigure_io_chain() must be called.  No return value.
+  */
+-static void __init omap44xx_prm_enable_io_wakeup(void)
++static void omap44xx_prm_enable_io_wakeup(void)
+ {
+       s32 inst = omap4_prmst_get_prm_dev_inst();
+ 
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index 61e91fee8467..edf6a61f0a64 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -349,6 +349,8 @@ static int __hw_perf_event_init(struct perf_event *event)
+               break;
+ 
+       case PERF_TYPE_HARDWARE:
++              if (is_sampling_event(event))   /* No sampling support */
++                      return -ENOENT;
+               ev = attr->config;
+               /* Count user space (problem-state) only */
+               if (!attr->exclude_user && attr->exclude_kernel) {
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 13dfb55b84db..f7c34184342a 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -55,7 +55,7 @@
+ #define PRIo64 "o"
+ 
+ /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define apic_debug(fmt, arg...)
++#define apic_debug(fmt, arg...) do {} while (0)
+ 
+ /* 14 is the version for Xeon and Pentium 8.4.8*/
+ #define APIC_VERSION                  (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 
16))
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index ec588cf4fe95..4353580b659a 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1089,7 +1089,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool 
masked);
+ static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
+                                           u16 error_code);
+ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
+-static void __always_inline vmx_disable_intercept_for_msr(unsigned long 
*msr_bitmap,
++static __always_inline void vmx_disable_intercept_for_msr(unsigned long 
*msr_bitmap,
+                                                         u32 msr, int type);
+ 
+ static DEFINE_PER_CPU(struct vmcs *, vmxarea);
+@@ -5227,7 +5227,7 @@ static void free_vpid(int vpid)
+       spin_unlock(&vmx_vpid_lock);
+ }
+ 
+-static void __always_inline vmx_disable_intercept_for_msr(unsigned long 
*msr_bitmap,
++static __always_inline void vmx_disable_intercept_for_msr(unsigned long 
*msr_bitmap,
+                                                         u32 msr, int type)
+ {
+       int f = sizeof(unsigned long);
+@@ -5262,7 +5262,7 @@ static void __always_inline 
vmx_disable_intercept_for_msr(unsigned long *msr_bit
+       }
+ }
+ 
+-static void __always_inline vmx_enable_intercept_for_msr(unsigned long 
*msr_bitmap,
++static __always_inline void vmx_enable_intercept_for_msr(unsigned long 
*msr_bitmap,
+                                                        u32 msr, int type)
+ {
+       int f = sizeof(unsigned long);
+@@ -5297,7 +5297,7 @@ static void __always_inline 
vmx_enable_intercept_for_msr(unsigned long *msr_bitm
+       }
+ }
+ 
+-static void __always_inline vmx_set_intercept_for_msr(unsigned long 
*msr_bitmap,
++static __always_inline void vmx_set_intercept_for_msr(unsigned long 
*msr_bitmap,
+                                                     u32 msr, int type, bool 
value)
+ {
+       if (value)
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index df208af3cd74..515d5e4414c2 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -7,7 +7,6 @@
+ 
+ #include <xen/features.h>
+ #include <xen/page.h>
+-#include <xen/interface/memory.h>
+ 
+ #include <asm/xen/hypercall.h>
+ #include <asm/xen/hypervisor.h>
+@@ -336,80 +335,3 @@ void xen_arch_unregister_cpu(int num)
+ }
+ EXPORT_SYMBOL(xen_arch_unregister_cpu);
+ #endif
+-
+-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+-void __init arch_xen_balloon_init(struct resource *hostmem_resource)
+-{
+-      struct xen_memory_map memmap;
+-      int rc;
+-      unsigned int i, last_guest_ram;
+-      phys_addr_t max_addr = PFN_PHYS(max_pfn);
+-      struct e820_table *xen_e820_table;
+-      const struct e820_entry *entry;
+-      struct resource *res;
+-
+-      if (!xen_initial_domain())
+-              return;
+-
+-      xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
+-      if (!xen_e820_table)
+-              return;
+-
+-      memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
+-      set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
+-      rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
+-      if (rc) {
+-              pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
+-              goto out;
+-      }
+-
+-      last_guest_ram = 0;
+-      for (i = 0; i < memmap.nr_entries; i++) {
+-              if (xen_e820_table->entries[i].addr >= max_addr)
+-                      break;
+-              if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
+-                      last_guest_ram = i;
+-      }
+-
+-      entry = &xen_e820_table->entries[last_guest_ram];
+-      if (max_addr >= entry->addr + entry->size)
+-              goto out; /* No unallocated host RAM. */
+-
+-      hostmem_resource->start = max_addr;
+-      hostmem_resource->end = entry->addr + entry->size;
+-
+-      /*
+-       * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
+-       * as unavailable. The rest of that region can be used for hotplug-based
+-       * ballooning.
+-       */
+-      for (; i < memmap.nr_entries; i++) {
+-              entry = &xen_e820_table->entries[i];
+-
+-              if (entry->type == E820_TYPE_RAM)
+-                      continue;
+-
+-              if (entry->addr >= hostmem_resource->end)
+-                      break;
+-
+-              res = kzalloc(sizeof(*res), GFP_KERNEL);
+-              if (!res)
+-                      goto out;
+-
+-              res->name = "Unavailable host RAM";
+-              res->start = entry->addr;
+-              res->end = (entry->addr + entry->size < hostmem_resource->end) ?
+-                          entry->addr + entry->size : hostmem_resource->end;
+-              rc = insert_resource(hostmem_resource, res);
+-              if (rc) {
+-                      pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
+-                              __func__, res->start, res->end, rc);
+-                      kfree(res);
+-                      goto  out;
+-              }
+-      }
+-
+- out:
+-      kfree(xen_e820_table);
+-}
+-#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index 6e0d2086eacb..c114ca767b3b 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
+       addr = xen_e820_table.entries[0].addr;
+       size = xen_e820_table.entries[0].size;
+       while (i < xen_e820_table.nr_entries) {
++              bool discard = false;
+ 
+               chunk_size = size;
+               type = xen_e820_table.entries[i].type;
+@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
+                               xen_add_extra_mem(pfn_s, n_pfns);
+                               xen_max_p2m_pfn = pfn_s + n_pfns;
+                       } else
+-                              type = E820_TYPE_UNUSABLE;
++                              discard = true;
+               }
+ 
+-              xen_align_and_add_e820_region(addr, chunk_size, type);
++              if (!discard)
++                      xen_align_and_add_e820_region(addr, chunk_size, type);
+ 
+               addr += chunk_size;
+               size -= chunk_size;
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index de56394dd161..ca414910710e 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -547,7 +547,7 @@ struct irq_domain *iort_get_device_domain(struct device 
*dev, u32 req_id)
+  */
+ static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
+ {
+-      struct acpi_iort_node *node, *msi_parent;
++      struct acpi_iort_node *node, *msi_parent = NULL;
+       struct fwnode_handle *iort_fwnode;
+       struct acpi_iort_its_group *its;
+       int i;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 3981915e2311..b2eecfc9042e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1992,12 +1992,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device 
*adev)
+ #endif
+ 
+       WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
++      udelay(50);
+ 
+       /* carrizo do enable cp interrupt after cp inited */
+-      if (!(adev->flags & AMD_IS_APU))
++      if (!(adev->flags & AMD_IS_APU)) {
+               gfx_v9_0_enable_gui_idle_interrupt(adev, true);
+-
+-      udelay(50);
++              udelay(50);
++      }
+ 
+ #ifdef AMDGPU_RLC_DEBUG_RETRY
+       /* RLC_GPM_GENERAL_6 : RLC Ucode version */
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index fae1176b2472..343867b182dd 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -973,9 +973,21 @@ static int get_clock(void *i2c_priv)
+ {
+       struct ast_i2c_chan *i2c = i2c_priv;
+       struct ast_private *ast = i2c->dev->dev_private;
+-      uint32_t val;
++      uint32_t val, val2, count, pass;
++
++      count = 0;
++      pass = 0;
++      val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) 
& 0x01;
++      do {
++              val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 
0x10) >> 4) & 0x01;
++              if (val == val2) {
++                      pass++;
++              } else {
++                      pass = 0;
++                      val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 
0xb7, 0x10) >> 4) & 0x01;
++              }
++      } while ((pass < 5) && (count++ < 0x10000));
+ 
+-      val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+       return val & 1 ? 1 : 0;
+ }
+ 
+@@ -983,9 +995,21 @@ static int get_data(void *i2c_priv)
+ {
+       struct ast_i2c_chan *i2c = i2c_priv;
+       struct ast_private *ast = i2c->dev->dev_private;
+-      uint32_t val;
++      uint32_t val, val2, count, pass;
++
++      count = 0;
++      pass = 0;
++      val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) 
& 0x01;
++      do {
++              val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 
0x20) >> 5) & 0x01;
++              if (val == val2) {
++                      pass++;
++              } else {
++                      pass = 0;
++                      val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 
0xb7, 0x20) >> 5) & 0x01;
++              }
++      } while ((pass < 5) && (count++ < 0x10000));
+ 
+-      val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+       return val & 1 ? 1 : 0;
+ }
+ 
+@@ -998,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
+ 
+       for (i = 0; i < 0x10000; i++) {
+               ujcrb7 = ((clock & 0x01) ? 0 : 1);
+-              ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, 
ujcrb7);
++              ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, 
ujcrb7);
+               jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 
0x01);
+               if (ujcrb7 == jtemp)
+                       break;
+@@ -1014,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
+ 
+       for (i = 0; i < 0x10000; i++) {
+               ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+-              ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, 
ujcrb7);
++              ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, 
ujcrb7);
+               jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 
0x04);
+               if (ujcrb7 == jtemp)
+                       break;
+diff --git a/drivers/gpu/drm/meson/meson_venc.c 
b/drivers/gpu/drm/meson/meson_venc.c
+index 9509017dbded..d5dfe7045cc6 100644
+--- a/drivers/gpu/drm/meson/meson_venc.c
++++ b/drivers/gpu/drm/meson/meson_venc.c
+@@ -714,6 +714,7 @@ struct meson_hdmi_venc_vic_mode {
+       { 5, &meson_hdmi_encp_mode_1080i60 },
+       { 20, &meson_hdmi_encp_mode_1080i50 },
+       { 32, &meson_hdmi_encp_mode_1080p24 },
++      { 33, &meson_hdmi_encp_mode_1080p50 },
+       { 34, &meson_hdmi_encp_mode_1080p30 },
+       { 31, &meson_hdmi_encp_mode_1080p50 },
+       { 16, &meson_hdmi_encp_mode_1080p60 },
+diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
+index 0bcf041368c7..574126b649e9 100644
+--- a/drivers/hid/hid-sensor-custom.c
++++ b/drivers/hid/hid-sensor-custom.c
+@@ -358,7 +358,7 @@ static ssize_t show_value(struct device *dev, struct 
device_attribute *attr,
+                                               sensor_inst->hsdev,
+                                               sensor_inst->hsdev->usage,
+                                               usage, report_id,
+-                                              SENSOR_HUB_SYNC);
++                                              SENSOR_HUB_SYNC, false);
+       } else if (!strncmp(name, "units", strlen("units")))
+               value = sensor_inst->fields[field_index].attribute.units;
+       else if (!strncmp(name, "unit-expo", strlen("unit-expo")))
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index faba542d1b07..b5bd5cb7d532 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(sensor_hub_get_feature);
+ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
+                                       u32 usage_id,
+                                       u32 attr_usage_id, u32 report_id,
+-                                      enum sensor_hub_read_flags flag)
++                                      enum sensor_hub_read_flags flag,
++                                      bool is_signed)
+ {
+       struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
+       unsigned long flags;
+@@ -331,10 +332,16 @@ int sensor_hub_input_attr_get_raw_value(struct 
hid_sensor_hub_device *hsdev,
+                                               &hsdev->pending.ready, HZ*5);
+               switch (hsdev->pending.raw_size) {
+               case 1:
+-                      ret_val = *(u8 *)hsdev->pending.raw_data;
++                      if (is_signed)
++                              ret_val = *(s8 *)hsdev->pending.raw_data;
++                      else
++                              ret_val = *(u8 *)hsdev->pending.raw_data;
+                       break;
+               case 2:
+-                      ret_val = *(u16 *)hsdev->pending.raw_data;
++                      if (is_signed)
++                              ret_val = *(s16 *)hsdev->pending.raw_data;
++                      else
++                              ret_val = *(u16 *)hsdev->pending.raw_data;
+                       break;
+               case 4:
+                       ret_val = *(u32 *)hsdev->pending.raw_data;
+diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
+index 71d3445ba869..07ee19573b3f 100644
+--- a/drivers/hwmon/ina2xx.c
++++ b/drivers/hwmon/ina2xx.c
+@@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 
reg,
+               break;
+       case INA2XX_CURRENT:
+               /* signed register, result in mA */
+-              val = regval * data->current_lsb_uA;
++              val = (s16)regval * data->current_lsb_uA;
+               val = DIV_ROUND_CLOSEST(val, 1000);
+               break;
+       case INA2XX_CALIBRATION:
+@@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client,
+       }
+ 
+       data->groups[group++] = &ina2xx_group;
+-      if (id->driver_data == ina226)
++      if (chip == ina226)
+               data->groups[group++] = &ina226_group;
+ 
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+@@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client,
+               return PTR_ERR(hwmon_dev);
+ 
+       dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
+-               id->name, data->rshunt);
++               client->name, data->rshunt);
+ 
+       return 0;
+ }
+diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
+index 49276bbdac3d..1bb80f992aa8 100644
+--- a/drivers/hwmon/w83795.c
++++ b/drivers/hwmon/w83795.c
+@@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct 
device_attribute *attr,
+  * somewhere else in the code
+  */
+ #define SENSOR_ATTR_TEMP(index) {                                     \
+-      SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
++      SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
+               show_temp_mode, store_temp_mode, NOT_USED, index - 1),  \
+       SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp,          \
+               NULL, TEMP_READ, index - 1),                            \
+diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c 
b/drivers/iio/accel/hid-sensor-accel-3d.c
+index 2238a26aba63..f573d9c61fc3 100644
+--- a/drivers/iio/accel/hid-sensor-accel-3d.c
++++ b/drivers/iio/accel/hid-sensor-accel-3d.c
+@@ -149,6 +149,7 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
+       int report_id = -1;
+       u32 address;
+       int ret_type;
++      s32 min;
+       struct hid_sensor_hub_device *hsdev =
+                                       accel_state->common_attributes.hsdev;
+ 
+@@ -158,12 +159,14 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
+       case 0:
+               hid_sensor_power_state(&accel_state->common_attributes, true);
+               report_id = accel_state->accel[chan->scan_index].report_id;
++              min = accel_state->accel[chan->scan_index].logical_minimum;
+               address = accel_3d_addresses[chan->scan_index];
+               if (report_id >= 0)
+                       *val = sensor_hub_input_attr_get_raw_value(
+                                       accel_state->common_attributes.hsdev,
+                                       hsdev->usage, address, report_id,
+-                                      SENSOR_HUB_SYNC);
++                                      SENSOR_HUB_SYNC,
++                                      min < 0);
+               else {
+                       *val = 0;
+                       hid_sensor_power_state(&accel_state->common_attributes,
+diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c 
b/drivers/iio/gyro/hid-sensor-gyro-3d.c
+index c67ce2ac4715..d9192eb41131 100644
+--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
++++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
+@@ -111,6 +111,7 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
+       int report_id = -1;
+       u32 address;
+       int ret_type;
++      s32 min;
+ 
+       *val = 0;
+       *val2 = 0;
+@@ -118,13 +119,15 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
+       case 0:
+               hid_sensor_power_state(&gyro_state->common_attributes, true);
+               report_id = gyro_state->gyro[chan->scan_index].report_id;
++              min = gyro_state->gyro[chan->scan_index].logical_minimum;
+               address = gyro_3d_addresses[chan->scan_index];
+               if (report_id >= 0)
+                       *val = sensor_hub_input_attr_get_raw_value(
+                                       gyro_state->common_attributes.hsdev,
+                                       HID_USAGE_SENSOR_GYRO_3D, address,
+                                       report_id,
+-                                      SENSOR_HUB_SYNC);
++                                      SENSOR_HUB_SYNC,
++                                      min < 0);
+               else {
+                       *val = 0;
+                       hid_sensor_power_state(&gyro_state->common_attributes,
+diff --git a/drivers/iio/humidity/hid-sensor-humidity.c 
b/drivers/iio/humidity/hid-sensor-humidity.c
+index 6e09c1acfe51..e53914d51ec3 100644
+--- a/drivers/iio/humidity/hid-sensor-humidity.c
++++ b/drivers/iio/humidity/hid-sensor-humidity.c
+@@ -75,7 +75,8 @@ static int humidity_read_raw(struct iio_dev *indio_dev,
+                               HID_USAGE_SENSOR_HUMIDITY,
+                               HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY,
+                               humid_st->humidity_attr.report_id,
+-                              SENSOR_HUB_SYNC);
++                              SENSOR_HUB_SYNC,
++                              humid_st->humidity_attr.logical_minimum < 0);
+               hid_sensor_power_state(&humid_st->common_attributes, false);
+ 
+               return IIO_VAL_INT;
+diff --git a/drivers/iio/light/hid-sensor-als.c 
b/drivers/iio/light/hid-sensor-als.c
+index 059d964772c7..95ca86f50434 100644
+--- a/drivers/iio/light/hid-sensor-als.c
++++ b/drivers/iio/light/hid-sensor-als.c
+@@ -93,6 +93,7 @@ static int als_read_raw(struct iio_dev *indio_dev,
+       int report_id = -1;
+       u32 address;
+       int ret_type;
++      s32 min;
+ 
+       *val = 0;
+       *val2 = 0;
+@@ -102,8 +103,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
+               case  CHANNEL_SCAN_INDEX_INTENSITY:
+               case  CHANNEL_SCAN_INDEX_ILLUM:
+                       report_id = als_state->als_illum.report_id;
+-                      address =
+-                      HID_USAGE_SENSOR_LIGHT_ILLUM;
++                      min = als_state->als_illum.logical_minimum;
++                      address = HID_USAGE_SENSOR_LIGHT_ILLUM;
+                       break;
+               default:
+                       report_id = -1;
+@@ -116,7 +117,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
+                                       als_state->common_attributes.hsdev,
+                                       HID_USAGE_SENSOR_ALS, address,
+                                       report_id,
+-                                      SENSOR_HUB_SYNC);
++                                      SENSOR_HUB_SYNC,
++                                      min < 0);
+                       hid_sensor_power_state(&als_state->common_attributes,
+                                               false);
+               } else {
+diff --git a/drivers/iio/light/hid-sensor-prox.c 
b/drivers/iio/light/hid-sensor-prox.c
+index 73fced8a63b7..8c017abc4ee2 100644
+--- a/drivers/iio/light/hid-sensor-prox.c
++++ b/drivers/iio/light/hid-sensor-prox.c
+@@ -73,6 +73,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
+       int report_id = -1;
+       u32 address;
+       int ret_type;
++      s32 min;
+ 
+       *val = 0;
+       *val2 = 0;
+@@ -81,8 +82,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
+               switch (chan->scan_index) {
+               case  CHANNEL_SCAN_INDEX_PRESENCE:
+                       report_id = prox_state->prox_attr.report_id;
+-                      address =
+-                      HID_USAGE_SENSOR_HUMAN_PRESENCE;
++                      min = prox_state->prox_attr.logical_minimum;
++                      address = HID_USAGE_SENSOR_HUMAN_PRESENCE;
+                       break;
+               default:
+                       report_id = -1;
+@@ -95,7 +96,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
+                               prox_state->common_attributes.hsdev,
+                               HID_USAGE_SENSOR_PROX, address,
+                               report_id,
+-                              SENSOR_HUB_SYNC);
++                              SENSOR_HUB_SYNC,
++                              min < 0);
+                       hid_sensor_power_state(&prox_state->common_attributes,
+                                               false);
+               } else {
+diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c 
b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+index 0e791b02ed4a..b495107bd173 100644
+--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
++++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+@@ -163,21 +163,23 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
+       int report_id = -1;
+       u32 address;
+       int ret_type;
++      s32 min;
+ 
+       *val = 0;
+       *val2 = 0;
+       switch (mask) {
+       case 0:
+               hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
+-              report_id =
+-                      magn_state->magn[chan->address].report_id;
++              report_id = magn_state->magn[chan->address].report_id;
++              min = magn_state->magn[chan->address].logical_minimum;
+               address = magn_3d_addresses[chan->address];
+               if (report_id >= 0)
+                       *val = sensor_hub_input_attr_get_raw_value(
+                               magn_state->magn_flux_attributes.hsdev,
+                               HID_USAGE_SENSOR_COMPASS_3D, address,
+                               report_id,
+-                              SENSOR_HUB_SYNC);
++                              SENSOR_HUB_SYNC,
++                              min < 0);
+               else {
+                       *val = 0;
+                       hid_sensor_power_state(
+diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c 
b/drivers/iio/orientation/hid-sensor-incl-3d.c
+index fd1b3696ee42..16c744bef021 100644
+--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
++++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
+@@ -111,21 +111,23 @@ static int incl_3d_read_raw(struct iio_dev *indio_dev,
+       int report_id = -1;
+       u32 address;
+       int ret_type;
++      s32 min;
+ 
+       *val = 0;
+       *val2 = 0;
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               hid_sensor_power_state(&incl_state->common_attributes, true);
+-              report_id =
+-                      incl_state->incl[chan->scan_index].report_id;
++              report_id = incl_state->incl[chan->scan_index].report_id;
++              min = incl_state->incl[chan->scan_index].logical_minimum;
+               address = incl_3d_addresses[chan->scan_index];
+               if (report_id >= 0)
+                       *val = sensor_hub_input_attr_get_raw_value(
+                               incl_state->common_attributes.hsdev,
+                               HID_USAGE_SENSOR_INCLINOMETER_3D, address,
+                               report_id,
+-                              SENSOR_HUB_SYNC);
++                              SENSOR_HUB_SYNC,
++                              min < 0);
+               else {
+                       hid_sensor_power_state(&incl_state->common_attributes,
+                                               false);
+diff --git a/drivers/iio/pressure/hid-sensor-press.c 
b/drivers/iio/pressure/hid-sensor-press.c
+index 6848d8c80eff..1c49ef78f888 100644
+--- a/drivers/iio/pressure/hid-sensor-press.c
++++ b/drivers/iio/pressure/hid-sensor-press.c
+@@ -77,6 +77,7 @@ static int press_read_raw(struct iio_dev *indio_dev,
+       int report_id = -1;
+       u32 address;
+       int ret_type;
++      s32 min;
+ 
+       *val = 0;
+       *val2 = 0;
+@@ -85,8 +86,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
+               switch (chan->scan_index) {
+               case  CHANNEL_SCAN_INDEX_PRESSURE:
+                       report_id = press_state->press_attr.report_id;
+-                      address =
+-                      HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
++                      min = press_state->press_attr.logical_minimum;
++                      address = HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
+                       break;
+               default:
+                       report_id = -1;
+@@ -99,7 +100,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
+                               press_state->common_attributes.hsdev,
+                               HID_USAGE_SENSOR_PRESSURE, address,
+                               report_id,
+-                              SENSOR_HUB_SYNC);
++                              SENSOR_HUB_SYNC,
++                              min < 0);
+                       hid_sensor_power_state(&press_state->common_attributes,
+                                               false);
+               } else {
+diff --git a/drivers/iio/temperature/hid-sensor-temperature.c 
b/drivers/iio/temperature/hid-sensor-temperature.c
+index c01efeca4002..6ed5cd5742f1 100644
+--- a/drivers/iio/temperature/hid-sensor-temperature.c
++++ b/drivers/iio/temperature/hid-sensor-temperature.c
+@@ -76,7 +76,8 @@ static int temperature_read_raw(struct iio_dev *indio_dev,
+                       HID_USAGE_SENSOR_TEMPERATURE,
+                       HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE,
+                       temp_st->temperature_attr.report_id,
+-                      SENSOR_HUB_SYNC);
++                      SENSOR_HUB_SYNC,
++                      temp_st->temperature_attr.logical_minimum < 0);
+               hid_sensor_power_state(
+                               &temp_st->common_attributes,
+                               false);
+diff --git a/drivers/infiniband/hw/hfi1/chip.c 
b/drivers/infiniband/hw/hfi1/chip.c
+index f9faacce9250..db33ad985a12 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -12449,7 +12449,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
+       }
+ 
+       /* allocate space for the counter values */
+-      dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
++      dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
++                          GFP_KERNEL);
+       if (!dd->cntrs)
+               goto bail;
+ 
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h 
b/drivers/infiniband/hw/hfi1/hfi.h
+index 13a7bcaa58e6..ee2859dcceab 100644
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -152,6 +152,8 @@ struct hfi1_ib_stats {
+ extern struct hfi1_ib_stats hfi1_stats;
+ extern const struct pci_error_handlers hfi1_pci_err_handler;
+ 
++extern int num_driver_cntrs;
++
+ /*
+  * First-cut criterion for "device is active" is
+  * two thousand dwords combined Tx, Rx traffic per
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c 
b/drivers/infiniband/hw/hfi1/verbs.c
+index 63d404a6752a..12cf0f7ca7bb 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -1693,7 +1693,7 @@ static const char * const driver_cntr_names[] = {
+ static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
+ static const char **dev_cntr_names;
+ static const char **port_cntr_names;
+-static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
++int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+ static int num_dev_cntrs;
+ static int num_port_cntrs;
+ static int cntr_names_initialized;
+diff --git a/drivers/infiniband/hw/mlx5/odp.c 
b/drivers/infiniband/hw/mlx5/odp.c
+index 3d701c7a4c91..1ed94b6c0b0a 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -723,6 +723,7 @@ next_mr:
+                       head = frame;
+ 
+                       bcnt -= frame->bcnt;
++                      offset = 0;
+               }
+               break;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index dfc190055167..964c3a0bbf16 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3928,17 +3928,18 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct 
ib_send_wr *wr,
+                       goto out;
+               }
+ 
+-              if (wr->opcode == IB_WR_LOCAL_INV ||
+-                  wr->opcode == IB_WR_REG_MR) {
++              if (wr->opcode == IB_WR_REG_MR) {
+                       fence = dev->umr_fence;
+                       next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+-              } else if (wr->send_flags & IB_SEND_FENCE) {
+-                      if (qp->next_fence)
+-                              fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+-                      else
+-                              fence = MLX5_FENCE_MODE_FENCE;
+-              } else {
+-                      fence = qp->next_fence;
++              } else  {
++                      if (wr->send_flags & IB_SEND_FENCE) {
++                              if (qp->next_fence)
++                                      fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
++                              else
++                                      fence = MLX5_FENCE_MODE_FENCE;
++                      } else {
++                              fence = qp->next_fence;
++                      }
+               }
+ 
+               switch (ibqp->qp_type) {
+diff --git a/drivers/infiniband/sw/rdmavt/ah.c 
b/drivers/infiniband/sw/rdmavt/ah.c
+index ba3639a0d77c..48ea5b8207f0 100644
+--- a/drivers/infiniband/sw/rdmavt/ah.c
++++ b/drivers/infiniband/sw/rdmavt/ah.c
+@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
+  * rvt_create_ah - create an address handle
+  * @pd: the protection domain
+  * @ah_attr: the attributes of the AH
++ * @udata: pointer to user's input output buffer information.
+  *
+  * This may be called from interrupt context.
+  *
+  * Return: newly allocated ah
+  */
+ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
+-                          struct rdma_ah_attr *ah_attr)
++                          struct rdma_ah_attr *ah_attr,
++                          struct ib_udata *udata)
+ {
+       struct rvt_ah *ah;
+       struct rvt_dev_info *dev = ib_to_rvt(pd->device);
+diff --git a/drivers/infiniband/sw/rdmavt/ah.h 
b/drivers/infiniband/sw/rdmavt/ah.h
+index 16105af99189..25271b48a683 100644
+--- a/drivers/infiniband/sw/rdmavt/ah.h
++++ b/drivers/infiniband/sw/rdmavt/ah.h
+@@ -51,7 +51,8 @@
+ #include <rdma/rdma_vt.h>
+ 
+ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
+-                          struct rdma_ah_attr *ah_attr);
++                          struct rdma_ah_attr *ah_attr,
++                          struct ib_udata *udata);
+ int rvt_destroy_ah(struct ib_ah *ibah);
+ int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+ int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c 
b/drivers/net/ethernet/cavium/thunder/nic_main.c
+index fb770b0182d3..d89ec4724efd 100644
+--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
+@@ -1376,6 +1376,9 @@ static void nic_remove(struct pci_dev *pdev)
+ {
+       struct nicpf *nic = pci_get_drvdata(pdev);
+ 
++      if (!nic)
++              return;
++
+       if (nic->flags & NIC_SRIOV_ENABLED)
+               pci_disable_sriov(pdev);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c 
b/drivers/net/ethernet/hisilicon/hip04_eth.c
+index 0cec06bec63e..c27054b8ce81 100644
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -914,10 +914,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
+       }
+ 
+       ret = register_netdev(ndev);
+-      if (ret) {
+-              free_netdev(ndev);
++      if (ret)
+               goto alloc_fail;
+-      }
+ 
+       return 0;
+ 
+diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c 
b/drivers/net/ethernet/intel/igb/e1000_i210.c
+index 07d48f2e3369..6766081f5ab9 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
+@@ -862,6 +862,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+               nvm_word = E1000_INVM_DEFAULT_AL;
+       tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+       igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 
E1000_PHY_PLL_FREQ_PAGE);
++      phy_word = E1000_PHY_PLL_UNCONF;
+       for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+               /* check current state directly from internal PHY */
+               igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+index cf6a245db6d5..a37c951b0753 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+@@ -2257,7 +2257,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct 
ixgbe_hw *hw,
+               *autoneg = false;
+ 
+               if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+-                  hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
++                  hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
++                  hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
++                  hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
+                       *speed = IXGBE_LINK_SPEED_1GB_FULL;
+                       return 0;
+               }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 
b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index 5fe56dc4cfae..5363cee88a0a 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -1070,8 +1070,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
+ 
+       tx_pause = !!(pause->tx_pause);
+       rx_pause = !!(pause->rx_pause);
+-      rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
+-      tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
++      rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
++      tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
+ 
+       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+                                   priv->rx_skb_size + ETH_FCS_LEN,
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c 
b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index faa4bd21f148..0fb85d71c11b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -3505,8 +3505,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int 
port,
+               dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+       }
+ 
+-      /* MTU range: 46 - hw-specific max */
+-      dev->min_mtu = MLX4_EN_MIN_MTU;
++      /* MTU range: 68 - hw-specific max */
++      dev->min_mtu = ETH_MIN_MTU;
+       dev->max_mtu = priv->max_mtu;
+ 
+       mdev->pndev[port] = dev;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 
b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index 09f4764a3f39..bdd87438a354 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -157,7 +157,6 @@
+ #define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
+ #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
+ 
+-#define MLX4_EN_MIN_MTU               46
+ /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+  * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+  */
+diff --git a/drivers/net/ethernet/realtek/8139cp.c 
b/drivers/net/ethernet/realtek/8139cp.c
+index e7ab23e87de2..d1e88712a275 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void 
*dev_instance)
+       struct cp_private *cp;
+       int handled = 0;
+       u16 status;
++      u16 mask;
+ 
+       if (unlikely(dev == NULL))
+               return IRQ_NONE;
+@@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void 
*dev_instance)
+ 
+       spin_lock(&cp->lock);
+ 
++      mask = cpr16(IntrMask);
++      if (!mask)
++              goto out_unlock;
++
+       status = cpr16(IntrStatus);
+       if (!status || (status == 0xFFFF))
+               goto out_unlock;
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index fe76e2c4022a..5b56a86e88ff 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1703,20 +1703,17 @@ EXPORT_SYMBOL(genphy_loopback);
+ 
+ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+ {
+-      phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
+-                             PHY_10BT_FEATURES);
+-
+       switch (max_speed) {
+-      default:
+-              return -ENOTSUPP;
+-      case SPEED_1000:
+-              phydev->supported |= PHY_1000BT_FEATURES;
++      case SPEED_10:
++              phydev->supported &= ~PHY_100BT_FEATURES;
+               /* fall through */
+       case SPEED_100:
+-              phydev->supported |= PHY_100BT_FEATURES;
+-              /* fall through */
+-      case SPEED_10:
+-              phydev->supported |= PHY_10BT_FEATURES;
++              phydev->supported &= ~PHY_1000BT_FEATURES;
++              break;
++      case SPEED_1000:
++              break;
++      default:
++              return -ENOTSUPP;
+       }
+ 
+       return 0;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 0a008d136aae..2956bb6cda72 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1818,9 +1818,9 @@ static void tun_setup(struct net_device *dev)
+ static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
+                       struct netlink_ext_ack *extack)
+ {
+-      if (!data)
+-              return 0;
+-      return -EINVAL;
++      NL_SET_ERR_MSG(extack,
++                     "tun/tap creation via rtnetlink is not supported.");
++      return -EOPNOTSUPP;
+ }
+ 
+ static struct rtnl_link_ops tun_link_ops __read_mostly = {
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 0e8e3be50332..215696f21d67 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -309,7 +309,8 @@ static unsigned int mergeable_ctx_to_truesize(void 
*mrg_ctx)
+ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+                                  struct receive_queue *rq,
+                                  struct page *page, unsigned int offset,
+-                                 unsigned int len, unsigned int truesize)
++                                 unsigned int len, unsigned int truesize,
++                                 bool hdr_valid)
+ {
+       struct sk_buff *skb;
+       struct virtio_net_hdr_mrg_rxbuf *hdr;
+@@ -331,7 +332,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+       else
+               hdr_padded_len = sizeof(struct padded_vnet_hdr);
+ 
+-      memcpy(hdr, p, hdr_len);
++      if (hdr_valid)
++              memcpy(hdr, p, hdr_len);
+ 
+       len -= hdr_len;
+       offset += hdr_padded_len;
+@@ -594,7 +596,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
+                                  unsigned int len)
+ {
+       struct page *page = buf;
+-      struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
++      struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
++                                        PAGE_SIZE, true);
+ 
+       if (unlikely(!skb))
+               goto err;
+@@ -678,7 +681,8 @@ static struct sk_buff *receive_mergeable(struct net_device 
*dev,
+                               rcu_read_unlock();
+                               put_page(page);
+                               head_skb = page_to_skb(vi, rq, xdp_page,
+-                                                     offset, len, PAGE_SIZE);
++                                                     offset, len,
++                                                     PAGE_SIZE, false);
+                               ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+                               return head_skb;
+                       }
+@@ -712,7 +716,7 @@ static struct sk_buff *receive_mergeable(struct net_device 
*dev,
+               goto err_skb;
+       }
+ 
+-      head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
++      head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
+       curr_skb = head_skb;
+ 
+       if (unlikely(!curr_skb))
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 3a63d58d2ca9..65f3f1a34b6b 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2572,6 +2572,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
+ {
+       struct nvme_ns *ns, *next;
+ 
++      /* prevent racing with ns scanning */
++      flush_work(&ctrl->scan_work);
++
+       /*
+        * The dead states indicates the controller was not gracefully
+        * disconnected. In that case, we won't be able to flush any data while
+@@ -2743,7 +2746,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
+ {
+       nvme_stop_keep_alive(ctrl);
+       flush_work(&ctrl->async_event_work);
+-      flush_work(&ctrl->scan_work);
+       cancel_work_sync(&ctrl->fw_act_work);
+ }
+ EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
+diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
+index b73483534a5b..1f1069b70e45 100644
+--- a/drivers/pci/dwc/pci-imx6.c
++++ b/drivers/pci/dwc/pci-imx6.c
+@@ -83,8 +83,6 @@ struct imx6_pcie {
+ #define PCIE_PL_PFLR_FORCE_LINK                       (1 << 15)
+ #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+ #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING       (1 << 29)
+-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP                (1 << 4)
+ 
+ #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+ #define PCIE_PHY_CTRL_DATA_LOC 0
+@@ -653,12 +651,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
+       return 0;
+ }
+ 
+-static int imx6_pcie_link_up(struct dw_pcie *pci)
+-{
+-      return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
+-                      PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
+-}
+-
+ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
+       .host_init = imx6_pcie_host_init,
+ };
+@@ -701,7 +693,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+ }
+ 
+ static const struct dw_pcie_ops dw_pcie_ops = {
+-      .link_up = imx6_pcie_link_up,
++      /* No special ops needed, but pcie-designware still expects this struct 
*/
+ };
+ 
+ static int imx6_pcie_probe(struct platform_device *pdev)
+diff --git a/drivers/rtc/rtc-hid-sensor-time.c 
b/drivers/rtc/rtc-hid-sensor-time.c
+index 2751dba850c6..3e1abb455472 100644
+--- a/drivers/rtc/rtc-hid-sensor-time.c
++++ b/drivers/rtc/rtc-hid-sensor-time.c
+@@ -213,7 +213,7 @@ static int hid_rtc_read_time(struct device *dev, struct 
rtc_time *tm)
+       /* get a report with all values through requesting one value */
+       sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
+                       HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
+-                      time_state->info[0].report_id, SENSOR_HUB_SYNC);
++                      time_state->info[0].report_id, SENSOR_HUB_SYNC, false);
+       /* wait for all values (event) */
+       ret = wait_for_completion_killable_timeout(
+                       &time_state->comp_last_time, HZ*6);
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 
b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+index d5e5f830f2a1..1b61da61690b 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+@@ -2383,7 +2383,7 @@ static int rtw_wx_read32(struct net_device *dev,
+ exit:
+       kfree(ptmp);
+ 
+-      return 0;
++      return ret;
+ }
+ 
+ static int rtw_wx_write32(struct net_device *dev,
+diff --git a/drivers/staging/speakup/kobjects.c 
b/drivers/staging/speakup/kobjects.c
+index ca85476e3ff7..23256bbdba51 100644
+--- a/drivers/staging/speakup/kobjects.c
++++ b/drivers/staging/speakup/kobjects.c
+@@ -387,7 +387,7 @@ static ssize_t synth_store(struct kobject *kobj, struct 
kobj_attribute *attr,
+       len = strlen(buf);
+       if (len < 2 || len > 9)
+               return -EINVAL;
+-      strncpy(new_synth_name, buf, len);
++      memcpy(new_synth_name, buf, len);
+       if (new_synth_name[len - 1] == '\n')
+               len--;
+       new_synth_name[len] = '\0';
+@@ -518,7 +518,7 @@ static ssize_t punc_store(struct kobject *kobj, struct 
kobj_attribute *attr,
+               return -EINVAL;
+       }
+ 
+-      strncpy(punc_buf, buf, x);
++      memcpy(punc_buf, buf, x);
+ 
+       while (x && punc_buf[x - 1] == '\n')
+               x--;
+diff --git a/drivers/usb/gadget/function/u_ether.c 
b/drivers/usb/gadget/function/u_ether.c
+index bdbc3fdc7c4f..3a0e4f5d7b83 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -405,12 +405,12 @@ done:
+ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+ {
+       struct usb_request      *req;
+-      struct usb_request      *tmp;
+       unsigned long           flags;
+ 
+       /* fill unused rxq slots with some skb */
+       spin_lock_irqsave(&dev->req_lock, flags);
+-      list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
++      while (!list_empty(&dev->rx_reqs)) {
++              req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
+               list_del_init(&req->list);
+               spin_unlock_irqrestore(&dev->req_lock, flags);
+ 
+@@ -1125,7 +1125,6 @@ void gether_disconnect(struct gether *link)
+ {
+       struct eth_dev          *dev = link->ioport;
+       struct usb_request      *req;
+-      struct usb_request      *tmp;
+ 
+       WARN_ON(!dev);
+       if (!dev)
+@@ -1142,7 +1141,8 @@ void gether_disconnect(struct gether *link)
+        */
+       usb_ep_disable(link->in_ep);
+       spin_lock(&dev->req_lock);
+-      list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
++      while (!list_empty(&dev->tx_reqs)) {
++              req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
+               list_del(&req->list);
+ 
+               spin_unlock(&dev->req_lock);
+@@ -1154,7 +1154,8 @@ void gether_disconnect(struct gether *link)
+ 
+       usb_ep_disable(link->out_ep);
+       spin_lock(&dev->req_lock);
+-      list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
++      while (!list_empty(&dev->rx_reqs)) {
++              req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
+               list_del(&req->list);
+ 
+               spin_unlock(&dev->req_lock);
+diff --git a/drivers/usb/gadget/udc/omap_udc.c 
b/drivers/usb/gadget/udc/omap_udc.c
+index f05ba6825bfe..ee0b87a0773c 100644
+--- a/drivers/usb/gadget/udc/omap_udc.c
++++ b/drivers/usb/gadget/udc/omap_udc.c
+@@ -2037,6 +2037,7 @@ static inline int machine_without_vbus_sense(void)
+ {
+       return machine_is_omap_innovator()
+               || machine_is_omap_osk()
++              || machine_is_omap_palmte()
+               || machine_is_sx1()
+               /* No known omap7xx boards with vbus sense */
+               || cpu_is_omap7xx();
+@@ -2045,7 +2046,7 @@ static inline int machine_without_vbus_sense(void)
+ static int omap_udc_start(struct usb_gadget *g,
+               struct usb_gadget_driver *driver)
+ {
+-      int             status = -ENODEV;
++      int             status;
+       struct omap_ep  *ep;
+       unsigned long   flags;
+ 
+@@ -2083,6 +2084,7 @@ static int omap_udc_start(struct usb_gadget *g,
+                       goto done;
+               }
+       } else {
++              status = 0;
+               if (can_pullup(udc))
+                       pullup_enable(udc);
+               else
+@@ -2612,9 +2614,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
+ 
+ static void omap_udc_release(struct device *dev)
+ {
+-      complete(udc->done);
++      pullup_disable(udc);
++      if (!IS_ERR_OR_NULL(udc->transceiver)) {
++              usb_put_phy(udc->transceiver);
++              udc->transceiver = NULL;
++      }
++      omap_writew(0, UDC_SYSCON1);
++      remove_proc_file();
++      if (udc->dc_clk) {
++              if (udc->clk_requested)
++                      omap_udc_enable_clock(0);
++              clk_put(udc->hhc_clk);
++              clk_put(udc->dc_clk);
++      }
++      if (udc->done)
++              complete(udc->done);
+       kfree(udc);
+-      udc = NULL;
+ }
+ 
+ static int
+@@ -2646,6 +2661,7 @@ omap_udc_setup(struct platform_device *odev, struct 
usb_phy *xceiv)
+       udc->gadget.speed = USB_SPEED_UNKNOWN;
+       udc->gadget.max_speed = USB_SPEED_FULL;
+       udc->gadget.name = driver_name;
++      udc->gadget.quirk_ep_out_aligned_size = 1;
+       udc->transceiver = xceiv;
+ 
+       /* ep0 is special; put it right after the SETUP buffer */
+@@ -2886,8 +2902,8 @@ bad_on_1710:
+               udc->clr_halt = UDC_RESET_EP;
+ 
+       /* USB general purpose IRQ:  ep0, state changes, dma, etc */
+-      status = request_irq(pdev->resource[1].start, omap_udc_irq,
+-                      0, driver_name, udc);
++      status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
++                                omap_udc_irq, 0, driver_name, udc);
+       if (status != 0) {
+               ERR("can't get irq %d, err %d\n",
+                       (int) pdev->resource[1].start, status);
+@@ -2895,20 +2911,20 @@ bad_on_1710:
+       }
+ 
+       /* USB "non-iso" IRQ (PIO for all but ep0) */
+-      status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
+-                      0, "omap_udc pio", udc);
++      status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
++                                omap_udc_pio_irq, 0, "omap_udc pio", udc);
+       if (status != 0) {
+               ERR("can't get irq %d, err %d\n",
+                       (int) pdev->resource[2].start, status);
+-              goto cleanup2;
++              goto cleanup1;
+       }
+ #ifdef        USE_ISO
+-      status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
+-                      0, "omap_udc iso", udc);
++      status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
++                                omap_udc_iso_irq, 0, "omap_udc iso", udc);
+       if (status != 0) {
+               ERR("can't get irq %d, err %d\n",
+                       (int) pdev->resource[3].start, status);
+-              goto cleanup3;
++              goto cleanup1;
+       }
+ #endif
+       if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+@@ -2919,23 +2935,8 @@ bad_on_1710:
+       }
+ 
+       create_proc_file();
+-      status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
+-                      omap_udc_release);
+-      if (status)
+-              goto cleanup4;
+-
+-      return 0;
+-
+-cleanup4:
+-      remove_proc_file();
+-
+-#ifdef        USE_ISO
+-cleanup3:
+-      free_irq(pdev->resource[2].start, udc);
+-#endif
+-
+-cleanup2:
+-      free_irq(pdev->resource[1].start, udc);
++      return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
++                                        omap_udc_release);
+ 
+ cleanup1:
+       kfree(udc);
+@@ -2962,42 +2963,15 @@ static int omap_udc_remove(struct platform_device 
*pdev)
+ {
+       DECLARE_COMPLETION_ONSTACK(done);
+ 
+-      if (!udc)
+-              return -ENODEV;
+-
+-      usb_del_gadget_udc(&udc->gadget);
+-      if (udc->driver)
+-              return -EBUSY;
+-
+       udc->done = &done;
+ 
+-      pullup_disable(udc);
+-      if (!IS_ERR_OR_NULL(udc->transceiver)) {
+-              usb_put_phy(udc->transceiver);
+-              udc->transceiver = NULL;
+-      }
+-      omap_writew(0, UDC_SYSCON1);
+-
+-      remove_proc_file();
+-
+-#ifdef        USE_ISO
+-      free_irq(pdev->resource[3].start, udc);
+-#endif
+-      free_irq(pdev->resource[2].start, udc);
+-      free_irq(pdev->resource[1].start, udc);
++      usb_del_gadget_udc(&udc->gadget);
+ 
+-      if (udc->dc_clk) {
+-              if (udc->clk_requested)
+-                      omap_udc_enable_clock(0);
+-              clk_put(udc->hhc_clk);
+-              clk_put(udc->dc_clk);
+-      }
++      wait_for_completion(&done);
+ 
+       release_mem_region(pdev->resource[0].start,
+                       pdev->resource[0].end - pdev->resource[0].start + 1);
+ 
+-      wait_for_completion(&done);
+-
+       return 0;
+ }
+ 
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 065f0b607373..f77e499afddd 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -257,25 +257,10 @@ static void release_memory_resource(struct resource 
*resource)
+       kfree(resource);
+ }
+ 
+-/*
+- * Host memory not allocated to dom0. We can use this range for hotplug-based
+- * ballooning.
+- *
+- * It's a type-less resource. Setting IORESOURCE_MEM will make resource
+- * management algorithms (arch_remove_reservations()) look into guest e820,
+- * which we don't want.
+- */
+-static struct resource hostmem_resource = {
+-      .name   = "Host RAM",
+-};
+-
+-void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
+-{}
+-
+ static struct resource *additional_memory_resource(phys_addr_t size)
+ {
+-      struct resource *res, *res_hostmem;
+-      int ret = -ENOMEM;
++      struct resource *res;
++      int ret;
+ 
+       res = kzalloc(sizeof(*res), GFP_KERNEL);
+       if (!res)
+@@ -284,42 +269,13 @@ static struct resource 
*additional_memory_resource(phys_addr_t size)
+       res->name = "System RAM";
+       res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ 
+-      res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
+-      if (res_hostmem) {
+-              /* Try to grab a range from hostmem */
+-              res_hostmem->name = "Host memory";
+-              ret = allocate_resource(&hostmem_resource, res_hostmem,
+-                                      size, 0, -1,
+-                                      PAGES_PER_SECTION * PAGE_SIZE, NULL, 
NULL);
+-      }
+-
+-      if (!ret) {
+-              /*
+-               * Insert this resource into iomem. Because hostmem_resource
+-               * tracks portion of guest e820 marked as UNUSABLE noone else
+-               * should try to use it.
+-               */
+-              res->start = res_hostmem->start;
+-              res->end = res_hostmem->end;
+-              ret = insert_resource(&iomem_resource, res);
+-              if (ret < 0) {
+-                      pr_err("Can't insert iomem_resource [%llx - %llx]\n",
+-                              res->start, res->end);
+-                      release_memory_resource(res_hostmem);
+-                      res_hostmem = NULL;
+-                      res->start = res->end = 0;
+-              }
+-      }
+-
+-      if (ret) {
+-              ret = allocate_resource(&iomem_resource, res,
+-                                      size, 0, -1,
+-                                      PAGES_PER_SECTION * PAGE_SIZE, NULL, 
NULL);
+-              if (ret < 0) {
+-                      pr_err("Cannot allocate new System RAM resource\n");
+-                      kfree(res);
+-                      return NULL;
+-              }
++      ret = allocate_resource(&iomem_resource, res,
++                              size, 0, -1,
++                              PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
++      if (ret < 0) {
++              pr_err("Cannot allocate new System RAM resource\n");
++              kfree(res);
++              return NULL;
+       }
+ 
+ #ifdef CONFIG_SPARSEMEM
+@@ -331,7 +287,6 @@ static struct resource 
*additional_memory_resource(phys_addr_t size)
+                       pr_err("New System RAM resource outside addressable RAM 
(%lu > %lu)\n",
+                              pfn, limit);
+                       release_memory_resource(res);
+-                      release_memory_resource(res_hostmem);
+                       return NULL;
+               }
+       }
+@@ -810,8 +765,6 @@ static int __init balloon_init(void)
+       set_online_page_callback(&xen_online_page);
+       register_memory_notifier(&xen_memory_nb);
+       register_sysctl_table(xen_root);
+-
+-      arch_xen_balloon_init(&hostmem_resource);
+ #endif
+ 
+ #ifdef CONFIG_XEN_PV
+diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
+index 23f1387b3ef7..e7df65d32c91 100644
+--- a/drivers/xen/xlate_mmu.c
++++ b/drivers/xen/xlate_mmu.c
+@@ -36,6 +36,7 @@
+ #include <asm/xen/hypervisor.h>
+ 
+ #include <xen/xen.h>
++#include <xen/xen-ops.h>
+ #include <xen/page.h>
+ #include <xen/interface/xen.h>
+ #include <xen/interface/memory.h>
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index baf5a4cd7ffc..3f22af96d63b 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -3354,7 +3354,8 @@ static void free_pending_move(struct send_ctx *sctx, 
struct pending_dir_move *m)
+       kfree(m);
+ }
+ 
+-static void tail_append_pending_moves(struct pending_dir_move *moves,
++static void tail_append_pending_moves(struct send_ctx *sctx,
++                                    struct pending_dir_move *moves,
+                                     struct list_head *stack)
+ {
+       if (list_empty(&moves->list)) {
+@@ -3365,6 +3366,10 @@ static void tail_append_pending_moves(struct 
pending_dir_move *moves,
+               list_add_tail(&moves->list, stack);
+               list_splice_tail(&list, stack);
+       }
++      if (!RB_EMPTY_NODE(&moves->node)) {
++              rb_erase(&moves->node, &sctx->pending_dir_moves);
++              RB_CLEAR_NODE(&moves->node);
++      }
+ }
+ 
+ static int apply_children_dir_moves(struct send_ctx *sctx)
+@@ -3379,7 +3384,7 @@ static int apply_children_dir_moves(struct send_ctx 
*sctx)
+               return 0;
+ 
+       INIT_LIST_HEAD(&stack);
+-      tail_append_pending_moves(pm, &stack);
++      tail_append_pending_moves(sctx, pm, &stack);
+ 
+       while (!list_empty(&stack)) {
+               pm = list_first_entry(&stack, struct pending_dir_move, list);
+@@ -3390,7 +3395,7 @@ static int apply_children_dir_moves(struct send_ctx 
*sctx)
+                       goto out;
+               pm = get_pending_dir_moves(sctx, parent_ino);
+               if (pm)
+-                      tail_append_pending_moves(pm, &stack);
++                      tail_append_pending_moves(sctx, pm, &stack);
+       }
+       return 0;
+ 
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index 199eb396a1bb..5e9176ec0d3a 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -537,7 +537,10 @@ static int cachefiles_read_backing_file(struct 
cachefiles_object *object,
+                                           netpage->index, cachefiles_gfp);
+               if (ret < 0) {
+                       if (ret == -EEXIST) {
++                              put_page(backpage);
++                              backpage = NULL;
+                               put_page(netpage);
++                              netpage = NULL;
+                               fscache_retrieval_complete(op, 1);
+                               continue;
+                       }
+@@ -610,7 +613,10 @@ static int cachefiles_read_backing_file(struct 
cachefiles_object *object,
+                                           netpage->index, cachefiles_gfp);
+               if (ret < 0) {
+                       if (ret == -EEXIST) {
++                              put_page(backpage);
++                              backpage = NULL;
+                               put_page(netpage);
++                              netpage = NULL;
+                               fscache_retrieval_complete(op, 1);
+                               continue;
+                       }
+@@ -963,11 +969,8 @@ error:
+ void cachefiles_uncache_page(struct fscache_object *_object, struct page 
*page)
+ {
+       struct cachefiles_object *object;
+-      struct cachefiles_cache *cache;
+ 
+       object = container_of(_object, struct cachefiles_object, fscache);
+-      cache = container_of(object->fscache.cache,
+-                           struct cachefiles_cache, cache);
+ 
+       _enter("%p,{%lu}", object, page->index);
+ 
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index 329a5d103846..c22cc9d2a5c9 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
+               struct dentry *parent = dget_parent(dentry);
+ 
+               dput(dentry);
+-              if (IS_ROOT(dentry)) {
++              if (dentry == parent) {
+                       dput(parent);
+                       return false;
+               }
+diff --git a/fs/fscache/object.c b/fs/fscache/object.c
+index 7a182c87f378..ab1d7f35f6c2 100644
+--- a/fs/fscache/object.c
++++ b/fs/fscache/object.c
+@@ -715,6 +715,9 @@ static const struct fscache_state 
*fscache_drop_object(struct fscache_object *ob
+ 
+       if (awaken)
+               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
++      if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
++              wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
++
+ 
+       /* Prevent a race with our last child, which has to signal EV_CLEARED
+        * before dropping our spinlock.
+diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
+index 374b5688e29e..9bdff5e40626 100644
+--- a/fs/hfs/btree.c
++++ b/fs/hfs/btree.c
+@@ -329,13 +329,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
+ 
+               nidx -= len * 8;
+               i = node->next;
+-              hfs_bnode_put(node);
+               if (!i) {
+                       /* panic */;
+                       pr_crit("unable to free bnode %u. bmap not found!\n",
+                               node->this);
++                      hfs_bnode_put(node);
+                       return;
+               }
++              hfs_bnode_put(node);
+               node = hfs_bnode_find(tree, i);
+               if (IS_ERR(node))
+                       return;
+diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
+index de14b2b6881b..3de3bc4918b5 100644
+--- a/fs/hfsplus/btree.c
++++ b/fs/hfsplus/btree.c
+@@ -454,14 +454,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
+ 
+               nidx -= len * 8;
+               i = node->next;
+-              hfs_bnode_put(node);
+               if (!i) {
+                       /* panic */;
+                       pr_crit("unable to free bnode %u. "
+                                       "bmap not found!\n",
+                               node->this);
++                      hfs_bnode_put(node);
+                       return;
+               }
++              hfs_bnode_put(node);
+               node = hfs_bnode_find(tree, i);
+               if (IS_ERR(node))
+                       return;
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c 
b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 13612a848378..8dbde5ded042 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1725,7 +1725,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+       if (fh)
+               hdr->args.fh = fh;
+ 
+-      if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
++      if (vers == 4 &&
++              !nfs4_ff_layout_select_ds_stateid(lseg, idx, 
&hdr->args.stateid))
+               goto out_failed;
+ 
+       /*
+@@ -1790,7 +1791,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, 
int sync)
+       if (fh)
+               hdr->args.fh = fh;
+ 
+-      if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
++      if (vers == 4 &&
++              !nfs4_ff_layout_select_ds_stateid(lseg, idx, 
&hdr->args.stateid))
+               goto out_failed;
+ 
+       /*
+diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
+index 9f88188060db..4bf8d5854b27 100644
+--- a/fs/ocfs2/export.c
++++ b/fs/ocfs2/export.c
+@@ -125,10 +125,10 @@ check_err:
+ 
+ check_gen:
+       if (handle->ih_generation != inode->i_generation) {
+-              iput(inode);
+               trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
+                                                 handle->ih_generation,
+                                                 inode->i_generation);
++              iput(inode);
+               result = ERR_PTR(-ESTALE);
+               goto bail;
+       }
+diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
+index 7eb3b0a6347e..f55f82ca3425 100644
+--- a/fs/ocfs2/move_extents.c
++++ b/fs/ocfs2/move_extents.c
+@@ -156,18 +156,14 @@ out:
+ }
+ 
+ /*
+- * lock allocators, and reserving appropriate number of bits for
+- * meta blocks and data clusters.
+- *
+- * in some cases, we don't need to reserve clusters, just let data_ac
+- * be NULL.
++ * lock allocator, and reserve appropriate number of bits for
++ * meta blocks.
+  */
+-static int ocfs2_lock_allocators_move_extents(struct inode *inode,
++static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
+                                       struct ocfs2_extent_tree *et,
+                                       u32 clusters_to_move,
+                                       u32 extents_to_split,
+                                       struct ocfs2_alloc_context **meta_ac,
+-                                      struct ocfs2_alloc_context **data_ac,
+                                       int extra_blocks,
+                                       int *credits)
+ {
+@@ -192,13 +188,6 @@ static int ocfs2_lock_allocators_move_extents(struct 
inode *inode,
+               goto out;
+       }
+ 
+-      if (data_ac) {
+-              ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
+-              if (ret) {
+-                      mlog_errno(ret);
+-                      goto out;
+-              }
+-      }
+ 
+       *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
+ 
+@@ -257,10 +246,10 @@ static int ocfs2_defrag_extent(struct 
ocfs2_move_extents_context *context,
+               }
+       }
+ 
+-      ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
+-                                               &context->meta_ac,
+-                                               &context->data_ac,
+-                                               extra_blocks, &credits);
++      ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
++                                              *len, 1,
++                                              &context->meta_ac,
++                                              extra_blocks, &credits);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+@@ -283,6 +272,21 @@ static int ocfs2_defrag_extent(struct 
ocfs2_move_extents_context *context,
+               }
+       }
+ 
++      /*
++       * Make sure ocfs2_reserve_cluster is called after
++       * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
++       *
++       * If ocfs2_reserve_cluster is called
++       * before __ocfs2_flush_truncate_log, dead lock on global bitmap
++       * may happen.
++       *
++       */
++      ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
++      if (ret) {
++              mlog_errno(ret);
++              goto out_unlock_mutex;
++      }
++
+       handle = ocfs2_start_trans(osb, credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+@@ -600,9 +604,10 @@ static int ocfs2_move_extent(struct 
ocfs2_move_extents_context *context,
+               }
+       }
+ 
+-      ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
+-                                               &context->meta_ac,
+-                                               NULL, extra_blocks, &credits);
++      ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
++                                              len, 1,
++                                              &context->meta_ac,
++                                              extra_blocks, &credits);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index 7125b398d312..9f7e546d7050 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -804,17 +804,14 @@ static int ramoops_probe(struct platform_device *pdev)
+ 
+       cxt->pstore.data = cxt;
+       /*
+-       * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
+-       * have to handle dumps, we must have at least record_size buffer. And
+-       * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
+-       * ZERO_SIZE_PTR).
++       * Since bufsize is only used for dmesg crash dumps, it
++       * must match the size of the dprz record (after PRZ header
++       * and ECC bytes have been accounted for).
+        */
+-      if (cxt->console_size)
+-              cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
+-      cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
+-      cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
++      cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
++      cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
+       if (!cxt->pstore.buf) {
+-              pr_err("cannot allocate pstore buffer\n");
++              pr_err("cannot allocate pstore crash dump buffer\n");
+               err = -ENOMEM;
+               goto fail_clear;
+       }
+diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
+index 3c47b7d5d4cf..9e0874d1524c 100644
+--- a/fs/sysv/inode.c
++++ b/fs/sysv/inode.c
+@@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int 
wait)
+                 }
+         }
+       brelse(bh);
+-      return 0;
++      return err;
+ }
+ 
+ int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
+diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
+index fc7aae64dcde..000de6da3b1b 100644
+--- a/include/linux/hid-sensor-hub.h
++++ b/include/linux/hid-sensor-hub.h
+@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct 
hid_sensor_hub_device *hsdev,
+ * @attr_usage_id:     Attribute usage id as per spec
+ * @report_id: Report id to look for
+ * @flag:      Synchronous or asynchronous read
++* @is_signed:   If true then fields < 32 bits will be sign-extended
+ *
+ * Issues a synchronous or asynchronous read request for an input attribute.
+ * Returns data upto 32 bits.
+@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
+ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
+                                       u32 usage_id,
+                                       u32 attr_usage_id, u32 report_id,
+-                                      enum sensor_hub_read_flags flag
++                                      enum sensor_hub_read_flags flag,
++                                      bool is_signed
+ );
+ 
+ /**
+diff --git a/include/linux/pstore.h b/include/linux/pstore.h
+index 61f806a7fe29..170bb981d2fd 100644
+--- a/include/linux/pstore.h
++++ b/include/linux/pstore.h
+@@ -90,7 +90,10 @@ struct pstore_record {
+  *
+  * @buf_lock: spinlock to serialize access to @buf
+  * @buf:      preallocated crash dump buffer
+- * @bufsize:  size of @buf available for crash dump writes
++ * @bufsize:  size of @buf available for crash dump bytes (must match
++ *            smallest number of bytes available for writing to a
++ *            backend entry, since compressed bytes don't take kindly
++ *            to being truncated)
+  *
+  * @read_mutex:       serializes @open, @read, @close, and @erase callbacks
+  * @flags:    bitfield of frontends the backend can accept writes for
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index a964366a7ef5..393099b1901a 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -452,6 +452,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, 
struct sk_buff *skb)
+ 
+ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff 
*skb)
+ {
++      unsigned int hh_alen = 0;
+       unsigned int seq;
+       unsigned int hh_len;
+ 
+@@ -459,16 +460,33 @@ static inline int neigh_hh_output(const struct hh_cache 
*hh, struct sk_buff *skb
+               seq = read_seqbegin(&hh->hh_lock);
+               hh_len = hh->hh_len;
+               if (likely(hh_len <= HH_DATA_MOD)) {
+-                      /* this is inlined by gcc */
+-                      memcpy(skb->data - HH_DATA_MOD, hh->hh_data, 
HH_DATA_MOD);
++                      hh_alen = HH_DATA_MOD;
++
++                      /* skb_push() would proceed silently if we have room for
++                       * the unaligned size but not for the aligned size:
++                       * check headroom explicitly.
++                       */
++                      if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
++                              /* this is inlined by gcc */
++                              memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
++                                     HH_DATA_MOD);
++                      }
+               } else {
+-                      unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
++                      hh_alen = HH_DATA_ALIGN(hh_len);
+ 
+-                      memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
++                      if (likely(skb_headroom(skb) >= hh_alen)) {
++                              memcpy(skb->data - hh_alen, hh->hh_data,
++                                     hh_alen);
++                      }
+               }
+       } while (read_seqretry(&hh->hh_lock, seq));
+ 
+-      skb_push(skb, hh_len);
++      if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
++              kfree_skb(skb);
++              return NET_XMIT_DROP;
++      }
++
++      __skb_push(skb, hh_len);
+       return dev_queue_xmit(skb);
+ }
+ 
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 8e1e1dc490fd..94c775773f58 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -1902,6 +1902,8 @@ struct sctp_association {
+ 
+       __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
+       __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
++
++      struct rcu_head rcu;
+ };
+ 
+ 
+diff --git a/include/xen/balloon.h b/include/xen/balloon.h
+index 61f410fd74e4..4914b93a23f2 100644
+--- a/include/xen/balloon.h
++++ b/include/xen/balloon.h
+@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
+ {
+ }
+ #endif
+-
+-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+-struct resource;
+-void arch_xen_balloon_init(struct resource *hostmem_resource);
+-#endif
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index a9cf2e15f6a3..2e2c86dd226f 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1548,6 +1548,146 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, 
int, len)
+       return do_syslog(type, buf, len, SYSLOG_FROM_READER);
+ }
+ 
++/*
++ * Special console_lock variants that help to reduce the risk of soft-lockups.
++ * They allow to pass console_lock to another printk() call using a busy wait.
++ */
++
++#ifdef CONFIG_LOCKDEP
++static struct lockdep_map console_owner_dep_map = {
++      .name = "console_owner"
++};
++#endif
++
++static DEFINE_RAW_SPINLOCK(console_owner_lock);
++static struct task_struct *console_owner;
++static bool console_waiter;
++
++/**
++ * console_lock_spinning_enable - mark beginning of code where another
++ *    thread might safely busy wait
++ *
++ * This basically converts console_lock into a spinlock. This marks
++ * the section where the console_lock owner can not sleep, because
++ * there may be a waiter spinning (like a spinlock). Also it must be
++ * ready to hand over the lock at the end of the section.
++ */
++static void console_lock_spinning_enable(void)
++{
++      raw_spin_lock(&console_owner_lock);
++      console_owner = current;
++      raw_spin_unlock(&console_owner_lock);
++
++      /* The waiter may spin on us after setting console_owner */
++      spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
++}
++
++/**
++ * console_lock_spinning_disable_and_check - mark end of code where another
++ *    thread was able to busy wait and check if there is a waiter
++ *
++ * This is called at the end of the section where spinning is allowed.
++ * It has two functions. First, it is a signal that it is no longer
++ * safe to start busy waiting for the lock. Second, it checks if
++ * there is a busy waiter and passes the lock rights to her.
++ *
++ * Important: Callers lose the lock if there was a busy waiter.
++ *    They must not touch items synchronized by console_lock
++ *    in this case.
++ *
++ * Return: 1 if the lock rights were passed, 0 otherwise.
++ */
++static int console_lock_spinning_disable_and_check(void)
++{
++      int waiter;
++
++      raw_spin_lock(&console_owner_lock);
++      waiter = READ_ONCE(console_waiter);
++      console_owner = NULL;
++      raw_spin_unlock(&console_owner_lock);
++
++      if (!waiter) {
++              spin_release(&console_owner_dep_map, 1, _THIS_IP_);
++              return 0;
++      }
++
++      /* The waiter is now free to continue */
++      WRITE_ONCE(console_waiter, false);
++
++      spin_release(&console_owner_dep_map, 1, _THIS_IP_);
++
++      /*
++       * Hand off console_lock to waiter. The waiter will perform
++       * the up(). After this, the waiter is the console_lock owner.
++       */
++      mutex_release(&console_lock_dep_map, 1, _THIS_IP_);
++      return 1;
++}
++
++/**
++ * console_trylock_spinning - try to get console_lock by busy waiting
++ *
++ * This allows to busy wait for the console_lock when the current
++ * owner is running in specially marked sections. It means that
++ * the current owner is running and cannot reschedule until it
++ * is ready to lose the lock.
++ *
++ * Return: 1 if we got the lock, 0 othrewise
++ */
++static int console_trylock_spinning(void)
++{
++      struct task_struct *owner = NULL;
++      bool waiter;
++      bool spin = false;
++      unsigned long flags;
++
++      if (console_trylock())
++              return 1;
++
++      printk_safe_enter_irqsave(flags);
++
++      raw_spin_lock(&console_owner_lock);
++      owner = READ_ONCE(console_owner);
++      waiter = READ_ONCE(console_waiter);
++      if (!waiter && owner && owner != current) {
++              WRITE_ONCE(console_waiter, true);
++              spin = true;
++      }
++      raw_spin_unlock(&console_owner_lock);
++
++      /*
++       * If there is an active printk() writing to the
++       * consoles, instead of having it write our data too,
++       * see if we can offload that load from the active
++       * printer, and do some printing ourselves.
++       * Go into a spin only if there isn't already a waiter
++       * spinning, and there is an active printer, and
++       * that active printer isn't us (recursive printk?).
++       */
++      if (!spin) {
++              printk_safe_exit_irqrestore(flags);
++              return 0;
++      }
++
++      /* We spin waiting for the owner to release us */
++      spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
++      /* Owner will clear console_waiter on hand off */
++      while (READ_ONCE(console_waiter))
++              cpu_relax();
++      spin_release(&console_owner_dep_map, 1, _THIS_IP_);
++
++      printk_safe_exit_irqrestore(flags);
++      /*
++       * The owner passed the console lock to us.
++       * Since we did not spin on console lock, annotate
++       * this as a trylock. Otherwise lockdep will
++       * complain.
++       */
++      mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
++
++      return 1;
++}
++
+ /*
+  * Call the console drivers, asking them to write out
+  * log_buf[start] to log_buf[end - 1].
+@@ -1773,7 +1913,7 @@ asmlinkage int vprintk_emit(int facility, int level,
+                * semaphore.  The release will print out buffers and wake up
+                * /dev/kmsg and syslog() users.
+                */
+-              if (console_trylock())
++              if (console_trylock_spinning())
+                       console_unlock();
+               preempt_enable();
+       }
+@@ -1876,6 +2016,8 @@ static ssize_t msg_print_ext_header(char *buf, size_t 
size,
+ static ssize_t msg_print_ext_body(char *buf, size_t size,
+                                 char *dict, size_t dict_len,
+                                 char *text, size_t text_len) { return 0; }
++static void console_lock_spinning_enable(void) { }
++static int console_lock_spinning_disable_and_check(void) { return 0; }
+ static void call_console_drivers(const char *ext_text, size_t ext_len,
+                                const char *text, size_t len) {}
+ static size_t msg_print_text(const struct printk_log *msg,
+@@ -2237,14 +2379,29 @@ skip:
+               console_seq++;
+               raw_spin_unlock(&logbuf_lock);
+ 
++              /*
++               * While actively printing out messages, if another printk()
++               * were to occur on another CPU, it may wait for this one to
++               * finish. This task can not be preempted if there is a
++               * waiter waiting to take over.
++               */
++              console_lock_spinning_enable();
++
+               stop_critical_timings();        /* don't trace print latency */
+               call_console_drivers(ext_text, ext_len, text, len);
+               start_critical_timings();
++
++              if (console_lock_spinning_disable_and_check()) {
++                      printk_safe_exit_irqrestore(flags);
++                      goto out;
++              }
++
+               printk_safe_exit_irqrestore(flags);
+ 
+               if (do_cond_resched)
+                       cond_resched();
+       }
++
+       console_locked = 0;
+ 
+       /* Release the exclusive_console once it is used */
+@@ -2269,6 +2426,7 @@ skip:
+       if (retry && console_trylock())
+               goto again;
+ 
++out:
+       if (wake_klogd)
+               wake_up_klogd();
+ }
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 6350f64d5aa4..f9dd8fd055a6 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -161,11 +161,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, 
u64, arg1,
+                       i++;
+               } else if (fmt[i] == 'p' || fmt[i] == 's') {
+                       mod[fmt_cnt]++;
+-                      i++;
+-                      if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
++                      /* disallow any further format extensions */
++                      if (fmt[i + 1] != 0 &&
++                          !isspace(fmt[i + 1]) &&
++                          !ispunct(fmt[i + 1]))
+                               return -EINVAL;
+                       fmt_cnt++;
+-                      if (fmt[i - 1] == 's') {
++                      if (fmt[i] == 's') {
+                               if (str_seen)
+                                       /* allow only one '%s' per fmt string */
+                                       return -EINVAL;
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 99308479b1c8..bacb00a9cd9f 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -111,7 +111,6 @@ static void fill_pool(void)
+               if (!new)
+                       return;
+ 
+-              kmemleak_ignore(new);
+               raw_spin_lock_irqsave(&pool_lock, flags);
+               hlist_add_head(&new->node, &obj_pool);
+               debug_objects_allocated++;
+@@ -1085,7 +1084,6 @@ static int __init 
debug_objects_replace_static_objects(void)
+               obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
+               if (!obj)
+                       goto free;
+-              kmemleak_ignore(obj);
+               hlist_add_head(&obj->node, &objects);
+       }
+ 
+@@ -1141,7 +1139,8 @@ void __init debug_objects_mem_init(void)
+ 
+       obj_cache = kmem_cache_create("debug_objects_cache",
+                                     sizeof (struct debug_obj), 0,
+-                                    SLAB_DEBUG_OBJECTS, NULL);
++                                    SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
++                                    NULL);
+ 
+       if (!obj_cache || debug_objects_replace_static_objects()) {
+               debug_objects_enabled = 0;
+diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
+index 0e343fd29570..835242e74aaa 100644
+--- a/lib/interval_tree_test.c
++++ b/lib/interval_tree_test.c
+@@ -11,10 +11,10 @@
+       MODULE_PARM_DESC(name, msg);
+ 
+ __param(int, nnodes, 100, "Number of nodes in the interval tree");
+-__param(int, perf_loops, 100000, "Number of iterations modifying the tree");
++__param(int, perf_loops, 1000, "Number of iterations modifying the tree");
+ 
+ __param(int, nsearches, 100, "Number of searches to the interval tree");
+-__param(int, search_loops, 10000, "Number of iterations searching the tree");
++__param(int, search_loops, 1000, "Number of iterations searching the tree");
+ __param(bool, search_all, false, "Searches will iterate all nodes in the 
tree");
+ 
+ __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
+diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
+index 191a238e5a9d..7d36c1e27ff6 100644
+--- a/lib/rbtree_test.c
++++ b/lib/rbtree_test.c
+@@ -11,7 +11,7 @@
+       MODULE_PARM_DESC(name, msg);
+ 
+ __param(int, nnodes, 100, "Number of nodes in the rb-tree");
+-__param(int, perf_loops, 100000, "Number of iterations modifying the 
rb-tree");
++__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
+ __param(int, check_loops, 100, "Number of iterations modifying and verifying 
the rb-tree");
+ 
+ struct test_node {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 6be91a1a00d9..a2f365f40433 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5544,8 +5544,10 @@ void __meminit init_currently_empty_zone(struct zone 
*zone,
+                                       unsigned long size)
+ {
+       struct pglist_data *pgdat = zone->zone_pgdat;
++      int zone_idx = zone_idx(zone) + 1;
+ 
+-      pgdat->nr_zones = zone_idx(zone) + 1;
++      if (zone_idx > pgdat->nr_zones)
++              pgdat->nr_zones = zone_idx;
+ 
+       zone->zone_start_pfn = zone_start_pfn;
+ 
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index c392a77ff788..925af6b43017 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3280,6 +3280,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
+ {
+       int err;
+ 
++      if (dev->type != ARPHRD_ETHER)
++              return -EINVAL;
++
+       netif_addr_lock_bh(dev);
+       err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
+       if (err)
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index cb8fa5d7afe1..f686d7761acb 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -513,6 +513,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff 
*skb,
+       struct rb_node *rbn;
+       int len;
+       int ihlen;
++      int delta;
+       int err;
+       u8 ecn;
+ 
+@@ -554,10 +555,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff 
*skb,
+       if (len > 65535)
+               goto out_oversize;
+ 
++      delta = - head->truesize;
++
+       /* Head of list must not be cloned. */
+       if (skb_unclone(head, GFP_ATOMIC))
+               goto out_nomem;
+ 
++      delta += head->truesize;
++      if (delta)
++              add_frag_mem_limit(qp->q.net, delta);
++
+       /* If the first fragment is fragmented itself, we split
+        * it to two chunks: the first with data and paged part
+        * and the second, holding only fragments. */
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index b2ead31afcba..24bad638c2ec 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1885,7 +1885,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff 
*skb, unsigned int len,
+  * This algorithm is from John Heffner.
+  */
+ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
+-                               bool *is_cwnd_limited, u32 max_segs)
++                               bool *is_cwnd_limited,
++                               bool *is_rwnd_limited,
++                               u32 max_segs)
+ {
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       u32 age, send_win, cong_win, limit, in_flight;
+@@ -1893,9 +1895,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct 
sk_buff *skb,
+       struct sk_buff *head;
+       int win_divisor;
+ 
+-      if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+-              goto send_now;
+-
+       if (icsk->icsk_ca_state >= TCP_CA_Recovery)
+               goto send_now;
+ 
+@@ -1951,10 +1950,27 @@ static bool tcp_tso_should_defer(struct sock *sk, 
struct sk_buff *skb,
+       if (age < (tp->srtt_us >> 4))
+               goto send_now;
+ 
+-      /* Ok, it looks like it is advisable to defer. */
++      /* Ok, it looks like it is advisable to defer.
++       * Three cases are tracked :
++       * 1) We are cwnd-limited
++       * 2) We are rwnd-limited
++       * 3) We are application limited.
++       */
++      if (cong_win < send_win) {
++              if (cong_win <= skb->len) {
++                      *is_cwnd_limited = true;
++                      return true;
++              }
++      } else {
++              if (send_win <= skb->len) {
++                      *is_rwnd_limited = true;
++                      return true;
++              }
++      }
+ 
+-      if (cong_win < send_win && cong_win <= skb->len)
+-              *is_cwnd_limited = true;
++      /* If this packet won't get more data, do not wait. */
++      if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
++              goto send_now;
+ 
+       return true;
+ 
+@@ -2328,7 +2344,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int 
mss_now, int nonagle,
+               } else {
+                       if (!push_one &&
+                           tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
+-                                               max_segs))
++                                               &is_rwnd_limited, max_segs))
+                               break;
+               }
+ 
+@@ -2473,14 +2489,18 @@ void tcp_send_loss_probe(struct sock *sk)
+               skb = tcp_write_queue_tail(sk);
+       }
+ 
++      if (unlikely(!skb)) {
++              WARN_ONCE(tp->packets_out,
++                        "invalid inflight: %u state %u cwnd %u mss %d\n",
++                        tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
++              inet_csk(sk)->icsk_pending = 0;
++              return;
++      }
++
+       /* At most one outstanding TLP retransmission. */
+       if (tp->tlp_high_seq)
+               goto rearm_timer;
+ 
+-      /* Retransmit last segment. */
+-      if (WARN_ON(!skb))
+-              goto rearm_timer;
+-
+       if (skb_still_in_host_queue(sk, skb))
+               goto rearm_timer;
+ 
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 9ab1e0fcbc13..7ca8264cbdf9 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, 
struct flowi6 *fl6,
+       const struct ipv6_pinfo *np = inet6_sk(sk);
+       struct in6_addr *first_hop = &fl6->daddr;
+       struct dst_entry *dst = skb_dst(skb);
++      unsigned int head_room;
+       struct ipv6hdr *hdr;
+       u8  proto = fl6->flowi6_proto;
+       int seg_len = skb->len;
+       int hlimit = -1;
+       u32 mtu;
+ 
+-      if (opt) {
+-              unsigned int head_room;
++      head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
++      if (opt)
++              head_room += opt->opt_nflen + opt->opt_flen;
+ 
+-              /* First: exthdrs may take lots of space (~8K for now)
+-                 MAX_HEADER is not enough.
+-               */
+-              head_room = opt->opt_nflen + opt->opt_flen;
+-              seg_len += head_room;
+-              head_room += sizeof(struct ipv6hdr) + 
LL_RESERVED_SPACE(dst->dev);
+-
+-              if (skb_headroom(skb) < head_room) {
+-                      struct sk_buff *skb2 = skb_realloc_headroom(skb, 
head_room);
+-                      if (!skb2) {
+-                              IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                                            IPSTATS_MIB_OUTDISCARDS);
+-                              kfree_skb(skb);
+-                              return -ENOBUFS;
+-                      }
+-                      if (skb->sk)
+-                              skb_set_owner_w(skb2, skb->sk);
+-                      consume_skb(skb);
+-                      skb = skb2;
++      if (unlikely(skb_headroom(skb) < head_room)) {
++              struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
++              if (!skb2) {
++                      IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
++                                    IPSTATS_MIB_OUTDISCARDS);
++                      kfree_skb(skb);
++                      return -ENOBUFS;
+               }
++              if (skb->sk)
++                      skb_set_owner_w(skb2, skb->sk);
++              consume_skb(skb);
++              skb = skb2;
++      }
++
++      if (opt) {
++              seg_len += opt->opt_nflen + opt->opt_flen;
++
+               if (opt->opt_flen)
+                       ipv6_push_frag_opts(skb, opt, &proto);
++
+               if (opt->opt_nflen)
+                       ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
+                                            &fl6->saddr);
+diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
+index 9bf260459f83..1f8b1a433b5d 100644
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -25,7 +25,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+       unsigned int hh_len;
+       struct dst_entry *dst;
+       struct flowi6 fl6 = {
+-              .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
++              .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
++                      rt6_need_strict(&iph->daddr) ? 
skb_dst(skb)->dev->ifindex : 0,
+               .flowi6_mark = skb->mark,
+               .flowi6_uid = sock_net_uid(net, sk),
+               .daddr = iph->daddr,
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c 
b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 611d406c4656..237fb04c6716 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -349,7 +349,7 @@ static bool
+ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct 
net_device *dev)
+ {
+       struct sk_buff *fp, *head = fq->q.fragments;
+-      int    payload_len;
++      int    payload_len, delta;
+       u8 ecn;
+ 
+       inet_frag_kill(&fq->q);
+@@ -371,10 +371,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff 
*prev,  struct net_devic
+               return false;
+       }
+ 
++      delta = - head->truesize;
++
+       /* Head of list must not be cloned. */
+       if (skb_unclone(head, GFP_ATOMIC))
+               return false;
+ 
++      delta += head->truesize;
++      if (delta)
++              add_frag_mem_limit(fq->q.net, delta);
++
+       /* If the first fragment is fragmented itself, we split
+        * it to two chunks: the first with data and paged part
+        * and the second, holding only fragments. */
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index ede0061b6f5d..2a8c680b67cd 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -348,7 +348,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct 
sk_buff *prev,
+ {
+       struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
+       struct sk_buff *fp, *head = fq->q.fragments;
+-      int    payload_len;
++      int    payload_len, delta;
+       unsigned int nhoff;
+       int sum_truesize;
+       u8 ecn;
+@@ -389,10 +389,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct 
sk_buff *prev,
+       if (payload_len > IPV6_MAXPLEN)
+               goto out_oversize;
+ 
++      delta = - head->truesize;
++
+       /* Head of list must not be cloned. */
+       if (skb_unclone(head, GFP_ATOMIC))
+               goto out_oom;
+ 
++      delta += head->truesize;
++      if (delta)
++              add_frag_mem_limit(fq->q.net, delta);
++
+       /* If the first fragment is fragmented itself, we split
+        * it to two chunks: the first with data and paged part
+        * and the second, holding only fragments. */
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index bf4763fd68c2..cf9342bfe95a 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -327,6 +327,7 @@ static int seg6_output(struct net *net, struct sock *sk, 
struct sk_buff *skb)
+               struct ipv6hdr *hdr = ipv6_hdr(skb);
+               struct flowi6 fl6;
+ 
++              memset(&fl6, 0, sizeof(fl6));
+               fl6.daddr = hdr->daddr;
+               fl6.saddr = hdr->saddr;
+               fl6.flowlabel = ip6_flowinfo(hdr);
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 327ebe786eeb..2f45c3ce77ef 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -4012,6 +4012,9 @@ static void __net_exit 
ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
+ 
+ static struct notifier_block ip_vs_dst_notifier = {
+       .notifier_call = ip_vs_dst_event,
++#ifdef CONFIG_IP_VS_IPV6
++      .priority = ADDRCONF_NOTIFY_PRIORITY + 5,
++#endif
+ };
+ 
+ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 3ae365f92bff..623ec29ade26 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2252,7 +2252,7 @@ err:
+ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
+                                  struct nft_rule *rule)
+ {
+-      struct nft_expr *expr;
++      struct nft_expr *expr, *next;
+ 
+       /*
+        * Careful: some expressions might not be initialized in case this
+@@ -2260,8 +2260,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx 
*ctx,
+        */
+       expr = nft_expr_first(rule);
+       while (expr != nft_expr_last(rule) && expr->ops) {
++              next = nft_expr_next(expr);
+               nf_tables_expr_destroy(ctx, expr);
+-              expr = nft_expr_next(expr);
++              expr = next;
+       }
+       kfree(rule);
+ }
+@@ -2399,21 +2400,14 @@ static int nf_tables_newrule(struct net *net, struct 
sock *nlsk,
+       }
+ 
+       if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+-              if (!nft_is_active_next(net, old_rule)) {
+-                      err = -ENOENT;
+-                      goto err2;
+-              }
+-              trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
+-                                         old_rule);
++              trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
+               if (trans == NULL) {
+                       err = -ENOMEM;
+                       goto err2;
+               }
+-              nft_deactivate_next(net, old_rule);
+-              chain->use--;
+-
+-              if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+-                      err = -ENOMEM;
++              err = nft_delrule(&ctx, old_rule);
++              if (err < 0) {
++                      nft_trans_destroy(trans);
+                       goto err2;
+               }
+ 
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 6da1cec1494a..7533c2fd6b76 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -497,6 +497,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const 
struct nft_expr *expr,
+                   void *info)
+ {
+       struct xt_match *match = expr->ops->data;
++      struct module *me = match->me;
+       struct xt_mtdtor_param par;
+ 
+       par.net = ctx->net;
+@@ -507,7 +508,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const 
struct nft_expr *expr,
+               par.match->destroy(&par);
+ 
+       if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
+-              module_put(match->me);
++              module_put(me);
+ }
+ 
+ static void
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index 0c034597b9b8..fe8e8a1622b5 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct 
hashlimit_cfg3 *cfg,
+ 
+       /* copy match config into hashtable config */
+       ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
+-
+-      if (ret)
++      if (ret) {
++              vfree(hinfo);
+               return ret;
++      }
+ 
+       hinfo->cfg.size = size;
+       if (hinfo->cfg.max == 0)
+@@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct 
xt_action_param *par)
+       int ret;
+ 
+       ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
+-
+       if (ret)
+               return ret;
+ 
+@@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct 
xt_action_param *par)
+       int ret;
+ 
+       ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
+-
+       if (ret)
+               return ret;
+ 
+@@ -920,7 +919,6 @@ static int hashlimit_mt_check_v1(const struct 
xt_mtchk_param *par)
+               return ret;
+ 
+       ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
+-
+       if (ret)
+               return ret;
+ 
+@@ -939,7 +937,6 @@ static int hashlimit_mt_check_v2(const struct 
xt_mtchk_param *par)
+               return ret;
+ 
+       ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
+-
+       if (ret)
+               return ret;
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 3d325b840802..3f4f0b946798 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -436,6 +436,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+       int count = 1;
+       int rc = NET_XMIT_SUCCESS;
+ 
++      /* Do not fool qdisc_drop_all() */
++      skb->prev = NULL;
++
+       /* Random duplication */
+       if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
+               ++count;
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 4982b31fec8e..23fec3817e0c 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -432,7 +432,7 @@ static void sctp_association_destroy(struct 
sctp_association *asoc)
+ 
+       WARN_ON(atomic_read(&asoc->rmem_alloc));
+ 
+-      kfree(asoc);
++      kfree_rcu(asoc, rcu);
+       SCTP_DBG_OBJCNT_DEC(assoc);
+ }
+ 
+diff --git a/sound/firewire/fireface/ff-protocol-ff400.c 
b/sound/firewire/fireface/ff-protocol-ff400.c
+index 9f5036442ab9..b47954a6b8ab 100644
+--- a/sound/firewire/fireface/ff-protocol-ff400.c
++++ b/sound/firewire/fireface/ff-protocol-ff400.c
+@@ -30,7 +30,7 @@ static int ff400_get_clock(struct snd_ff *ff, unsigned int 
*rate,
+       int err;
+ 
+       err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
+-                               FF400_SYNC_STATUS, &reg, sizeof(reg), 0);
++                               FF400_CLOCK_CONFIG, &reg, sizeof(reg), 0);
+       if (err < 0)
+               return err;
+       data = le32_to_cpu(reg);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f6136f041a81..31c91e0a815e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6965,6 +6965,37 @@ static void alc269_fill_coef(struct hda_codec *codec)
+       alc_update_coef_idx(codec, 0x4, 0, 1<<11);
+ }
+ 
++static void alc294_hp_init(struct hda_codec *codec)
++{
++      struct alc_spec *spec = codec->spec;
++      hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++      int i, val;
++
++      if (!hp_pin)
++              return;
++
++      snd_hda_codec_write(codec, hp_pin, 0,
++                          AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++
++      msleep(100);
++
++      snd_hda_codec_write(codec, hp_pin, 0,
++                          AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++
++      alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual 
mode */
++      alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop 
procedure start */
++
++      /* Wait for depop procedure finish  */
++      val = alc_read_coefex_idx(codec, 0x58, 0x01);
++      for (i = 0; i < 20 && val & 0x0080; i++) {
++              msleep(50);
++              val = alc_read_coefex_idx(codec, 0x58, 0x01);
++      }
++      /* Set HP depop to auto mode */
++      alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
++      msleep(50);
++}
++
+ /*
+  */
+ static int patch_alc269(struct hda_codec *codec)
+@@ -7101,6 +7132,7 @@ static int patch_alc269(struct hda_codec *codec)
+               spec->codec_variant = ALC269_TYPE_ALC294;
+               spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback 
mixer path */
+               alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* 
UAJ MIC Vref control by verb */
++              alc294_hp_init(codec);
+               break;
+       case 0x10ec0300:
+               spec->codec_variant = ALC269_TYPE_ALC300;
+@@ -7112,6 +7144,7 @@ static int patch_alc269(struct hda_codec *codec)
+               spec->codec_variant = ALC269_TYPE_ALC700;
+               spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback 
mixer path */
+               alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack 
auto trigger control */
++              alc294_hp_init(codec);
+               break;
+ 
+       }
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 989d093abda7..67330b6ab204 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -787,38 +787,41 @@ static unsigned int wm_adsp_region_to_reg(struct 
wm_adsp_region const *mem,
+ 
+ static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
+ {
+-      u16 scratch[4];
++      unsigned int scratch[4];
++      unsigned int addr = dsp->base + ADSP2_SCRATCH0;
++      unsigned int i;
+       int ret;
+ 
+-      ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2_SCRATCH0,
+-                              scratch, sizeof(scratch));
+-      if (ret) {
+-              adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
+-              return;
++      for (i = 0; i < ARRAY_SIZE(scratch); ++i) {
++              ret = regmap_read(dsp->regmap, addr + i, &scratch[i]);
++              if (ret) {
++                      adsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
++                      return;
++              }
+       }
+ 
+       adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+-               be16_to_cpu(scratch[0]),
+-               be16_to_cpu(scratch[1]),
+-               be16_to_cpu(scratch[2]),
+-               be16_to_cpu(scratch[3]));
++               scratch[0], scratch[1], scratch[2], scratch[3]);
+ }
+ 
+ static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
+ {
+-      u32 scratch[2];
++      unsigned int scratch[2];
+       int ret;
+ 
+-      ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
+-                            scratch, sizeof(scratch));
+-
++      ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
++                        &scratch[0]);
+       if (ret) {
+-              adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
++              adsp_err(dsp, "Failed to read SCRATCH0_1: %d\n", ret);
+               return;
+       }
+ 
+-      scratch[0] = be32_to_cpu(scratch[0]);
+-      scratch[1] = be32_to_cpu(scratch[1]);
++      ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH2_3,
++                        &scratch[1]);
++      if (ret) {
++              adsp_err(dsp, "Failed to read SCRATCH2_3: %d\n", ret);
++              return;
++      }
+ 
+       adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+                scratch[0] & 0xFFFF,
+diff --git a/sound/soc/omap/omap-abe-twl6040.c 
b/sound/soc/omap/omap-abe-twl6040.c
+index 614b18d2f631..6fd143799534 100644
+--- a/sound/soc/omap/omap-abe-twl6040.c
++++ b/sound/soc/omap/omap-abe-twl6040.c
+@@ -36,6 +36,8 @@
+ #include "../codecs/twl6040.h"
+ 
+ struct abe_twl6040 {
++      struct snd_soc_card card;
++      struct snd_soc_dai_link dai_links[2];
+       int     jack_detection; /* board can detect jack events */
+       int     mclk_freq;      /* MCLK frequency speed for twl6040 */
+ };
+@@ -208,40 +210,10 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime 
*rtd)
+                               ARRAY_SIZE(dmic_audio_map));
+ }
+ 
+-/* Digital audio interface glue - connects codec <--> CPU */
+-static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
+-      {
+-              .name = "TWL6040",
+-              .stream_name = "TWL6040",
+-              .codec_dai_name = "twl6040-legacy",
+-              .codec_name = "twl6040-codec",
+-              .init = omap_abe_twl6040_init,
+-              .ops = &omap_abe_ops,
+-      },
+-      {
+-              .name = "DMIC",
+-              .stream_name = "DMIC Capture",
+-              .codec_dai_name = "dmic-hifi",
+-              .codec_name = "dmic-codec",
+-              .init = omap_abe_dmic_init,
+-              .ops = &omap_abe_dmic_ops,
+-      },
+-};
+-
+-/* Audio machine driver */
+-static struct snd_soc_card omap_abe_card = {
+-      .owner = THIS_MODULE,
+-
+-      .dapm_widgets = twl6040_dapm_widgets,
+-      .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
+-      .dapm_routes = audio_map,
+-      .num_dapm_routes = ARRAY_SIZE(audio_map),
+-};
+-
+ static int omap_abe_probe(struct platform_device *pdev)
+ {
+       struct device_node *node = pdev->dev.of_node;
+-      struct snd_soc_card *card = &omap_abe_card;
++      struct snd_soc_card *card;
+       struct device_node *dai_node;
+       struct abe_twl6040 *priv;
+       int num_links = 0;
+@@ -252,12 +224,18 @@ static int omap_abe_probe(struct platform_device *pdev)
+               return -ENODEV;
+       }
+ 
+-      card->dev = &pdev->dev;
+-
+       priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
+       if (priv == NULL)
+               return -ENOMEM;
+ 
++      card = &priv->card;
++      card->dev = &pdev->dev;
++      card->owner = THIS_MODULE;
++      card->dapm_widgets = twl6040_dapm_widgets;
++      card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
++      card->dapm_routes = audio_map;
++      card->num_dapm_routes = ARRAY_SIZE(audio_map);
++
+       if (snd_soc_of_parse_card_name(card, "ti,model")) {
+               dev_err(&pdev->dev, "Card name is not provided\n");
+               return -ENODEV;
+@@ -274,14 +252,27 @@ static int omap_abe_probe(struct platform_device *pdev)
+               dev_err(&pdev->dev, "McPDM node is not provided\n");
+               return -EINVAL;
+       }
+-      abe_twl6040_dai_links[0].cpu_of_node = dai_node;
+-      abe_twl6040_dai_links[0].platform_of_node = dai_node;
++
++      priv->dai_links[0].name = "DMIC";
++      priv->dai_links[0].stream_name = "TWL6040";
++      priv->dai_links[0].cpu_of_node = dai_node;
++      priv->dai_links[0].platform_of_node = dai_node;
++      priv->dai_links[0].codec_dai_name = "twl6040-legacy";
++      priv->dai_links[0].codec_name = "twl6040-codec";
++      priv->dai_links[0].init = omap_abe_twl6040_init;
++      priv->dai_links[0].ops = &omap_abe_ops;
+ 
+       dai_node = of_parse_phandle(node, "ti,dmic", 0);
+       if (dai_node) {
+               num_links = 2;
+-              abe_twl6040_dai_links[1].cpu_of_node = dai_node;
+-              abe_twl6040_dai_links[1].platform_of_node = dai_node;
++              priv->dai_links[1].name = "TWL6040";
++              priv->dai_links[1].stream_name = "DMIC Capture";
++              priv->dai_links[1].cpu_of_node = dai_node;
++              priv->dai_links[1].platform_of_node = dai_node;
++              priv->dai_links[1].codec_dai_name = "dmic-hifi";
++              priv->dai_links[1].codec_name = "dmic-codec";
++              priv->dai_links[1].init = omap_abe_dmic_init;
++              priv->dai_links[1].ops = &omap_abe_dmic_ops;
+       } else {
+               num_links = 1;
+       }
+@@ -300,7 +291,7 @@ static int omap_abe_probe(struct platform_device *pdev)
+               return -ENODEV;
+       }
+ 
+-      card->dai_link = abe_twl6040_dai_links;
++      card->dai_link = priv->dai_links;
+       card->num_links = num_links;
+ 
+       snd_soc_card_set_drvdata(card, priv);
+diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
+index 09db2aec12a3..776e809a8aab 100644
+--- a/sound/soc/omap/omap-dmic.c
++++ b/sound/soc/omap/omap-dmic.c
+@@ -48,6 +48,8 @@ struct omap_dmic {
+       struct device *dev;
+       void __iomem *io_base;
+       struct clk *fclk;
++      struct pm_qos_request pm_qos_req;
++      int latency;
+       int fclk_freq;
+       int out_freq;
+       int clk_div;
+@@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct 
snd_pcm_substream *substream,
+ 
+       mutex_lock(&dmic->mutex);
+ 
++      pm_qos_remove_request(&dmic->pm_qos_req);
++
+       if (!dai->active)
+               dmic->active = 0;
+ 
+@@ -226,6 +230,8 @@ static int omap_dmic_dai_hw_params(struct 
snd_pcm_substream *substream,
+       /* packet size is threshold * channels */
+       dma_data = snd_soc_dai_get_dma_data(dai, substream);
+       dma_data->maxburst = dmic->threshold * channels;
++      dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
++                      params_rate(params);
+ 
+       return 0;
+ }
+@@ -236,6 +242,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream 
*substream,
+       struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
+       u32 ctrl;
+ 
++      if (pm_qos_request_active(&dmic->pm_qos_req))
++              pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
++
+       /* Configure uplink threshold */
+       omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
+ 
+diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
+index 6b40bdbef336..47c2ed5ca492 100644
+--- a/sound/soc/omap/omap-mcbsp.c
++++ b/sound/soc/omap/omap-mcbsp.c
+@@ -308,9 +308,9 @@ static int omap_mcbsp_dai_hw_params(struct 
snd_pcm_substream *substream,
+                       pkt_size = channels;
+               }
+ 
+-              latency = ((((buffer_size - pkt_size) / channels) * 1000)
+-                               / (params->rate_num / params->rate_den));
+-
++              latency = (buffer_size - pkt_size) / channels;
++              latency = latency * USEC_PER_SEC /
++                        (params->rate_num / params->rate_den);
+               mcbsp->latency[substream->stream] = latency;
+ 
+               omap_mcbsp_set_threshold(substream, pkt_size);
+diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
+index 64609c77a79d..44ffeb71cd1d 100644
+--- a/sound/soc/omap/omap-mcpdm.c
++++ b/sound/soc/omap/omap-mcpdm.c
+@@ -54,6 +54,8 @@ struct omap_mcpdm {
+       unsigned long phys_base;
+       void __iomem *io_base;
+       int irq;
++      struct pm_qos_request pm_qos_req;
++      int latency[2];
+ 
+       struct mutex mutex;
+ 
+@@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct 
snd_pcm_substream *substream,
+                                 struct snd_soc_dai *dai)
+ {
+       struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
++      int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
++      int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
++      int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 
+       mutex_lock(&mcpdm->mutex);
+ 
+@@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct 
snd_pcm_substream *substream,
+               }
+       }
+ 
++      if (mcpdm->latency[stream2])
++              pm_qos_update_request(&mcpdm->pm_qos_req,
++                                    mcpdm->latency[stream2]);
++      else if (mcpdm->latency[stream1])
++              pm_qos_remove_request(&mcpdm->pm_qos_req);
++
++      mcpdm->latency[stream1] = 0;
++
+       mutex_unlock(&mcpdm->mutex);
+ }
+ 
+@@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct 
snd_pcm_substream *substream,
+       int stream = substream->stream;
+       struct snd_dmaengine_dai_dma_data *dma_data;
+       u32 threshold;
+-      int channels;
++      int channels, latency;
+       int link_mask = 0;
+ 
+       channels = params_channels(params);
+@@ -340,14 +353,25 @@ static int omap_mcpdm_dai_hw_params(struct 
snd_pcm_substream *substream,
+ 
+               dma_data->maxburst =
+                               (MCPDM_DN_THRES_MAX - threshold) * channels;
++              latency = threshold;
+       } else {
+               /* If playback is not running assume a stereo stream to come */
+               if (!mcpdm->config[!stream].link_mask)
+                       mcpdm->config[!stream].link_mask = (0x3 << 3);
+ 
+               dma_data->maxburst = threshold * channels;
++              latency = (MCPDM_DN_THRES_MAX - threshold);
+       }
+ 
++      /*
++       * The DMA must act to a DMA request within latency time (usec) to avoid
++       * under/overflow
++       */
++      mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
++
++      if (!mcpdm->latency[stream])
++              mcpdm->latency[stream] = 10;
++
+       /* Check if we need to restart McPDM with this stream */
+       if (mcpdm->config[stream].link_mask &&
+           mcpdm->config[stream].link_mask != link_mask)
+@@ -362,6 +386,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream 
*substream,
+                                 struct snd_soc_dai *dai)
+ {
+       struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
++      struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
++      int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
++      int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
++      int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
++      int latency = mcpdm->latency[stream2];
++
++      /* Prevent omap hardware from hitting off between FIFO fills */
++      if (!latency || mcpdm->latency[stream1] < latency)
++              latency = mcpdm->latency[stream1];
++
++      if (pm_qos_request_active(pm_qos_req))
++              pm_qos_update_request(pm_qos_req, latency);
++      else if (latency)
++              pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
+ 
+       if (!omap_mcpdm_active(mcpdm)) {
+               omap_mcpdm_start(mcpdm);
+@@ -423,6 +461,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
+       free_irq(mcpdm->irq, (void *)mcpdm);
+       pm_runtime_disable(mcpdm->dev);
+ 
++      if (pm_qos_request_active(&mcpdm->pm_qos_req))
++              pm_qos_remove_request(&mcpdm->pm_qos_req);
++
+       return 0;
+ }
+ 
+diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
+index 34223c8c28a8..0db2791f7035 100644
+--- a/sound/soc/sh/rcar/ssi.c
++++ b/sound/soc/sh/rcar/ssi.c
+@@ -280,7 +280,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
+       if (rsnd_ssi_is_multi_slave(mod, io))
+               return 0;
+ 
+-      if (ssi->rate) {
++      if (ssi->usrcnt > 1) {
+               if (ssi->rate != rate) {
+                       dev_err(dev, "SSI parent/child should use same rate\n");
+                       return -EINVAL;
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index fee4b0ef5566..42c2a3065b77 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -2307,6 +2307,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card 
*card)
+       }
+ 
+       card->instantiated = 1;
++      dapm_mark_endpoints_dirty(card);
+       snd_soc_dapm_sync(&card->dapm);
+       mutex_unlock(&card->mutex);
+       mutex_unlock(&client_mutex);
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index 0d1acb704f64..dd4ed7c3c062 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -31,6 +31,8 @@
+ #include "elf.h"
+ #include "warn.h"
+ 
++#define MAX_NAME_LEN 128
++
+ struct section *find_section_by_name(struct elf *elf, const char *name)
+ {
+       struct section *sec;
+@@ -298,6 +300,8 @@ static int read_symbols(struct elf *elf)
+       /* Create parent/child links for any cold subfunctions */
+       list_for_each_entry(sec, &elf->sections, list) {
+               list_for_each_entry(sym, &sec->symbol_list, list) {
++                      char pname[MAX_NAME_LEN + 1];
++                      size_t pnamelen;
+                       if (sym->type != STT_FUNC)
+                               continue;
+                       sym->pfunc = sym->cfunc = sym;
+@@ -305,14 +309,21 @@ static int read_symbols(struct elf *elf)
+                       if (!coldstr)
+                               continue;
+ 
+-                      coldstr[0] = '\0';
+-                      pfunc = find_symbol_by_name(elf, sym->name);
+-                      coldstr[0] = '.';
++                      pnamelen = coldstr - sym->name;
++                      if (pnamelen > MAX_NAME_LEN) {
++                              WARN("%s(): parent function name exceeds 
maximum length of %d characters",
++                                   sym->name, MAX_NAME_LEN);
++                              return -1;
++                      }
++
++                      strncpy(pname, sym->name, pnamelen);
++                      pname[pnamelen] = '\0';
++                      pfunc = find_symbol_by_name(elf, pname);
+ 
+                       if (!pfunc) {
+                               WARN("%s(): can't find parent function",
+                                    sym->name);
+-                              goto err;
++                              return -1;
+                       }
+ 
+                       sym->pfunc = pfunc;
+diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
+index 1ef0049860a8..eadc7ddacbf6 100644
+--- a/tools/perf/util/namespaces.c
++++ b/tools/perf/util/namespaces.c
+@@ -17,6 +17,7 @@
+ #include <stdio.h>
+ #include <string.h>
+ #include <unistd.h>
++#include <asm/bug.h>
+ 
+ struct namespaces *namespaces__new(struct namespaces_event *event)
+ {
+@@ -185,6 +186,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
+       char curpath[PATH_MAX];
+       int oldns = -1;
+       int newns = -1;
++      char *oldcwd = NULL;
+ 
+       if (nc == NULL)
+               return;
+@@ -198,9 +200,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
+       if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
+               return;
+ 
++      oldcwd = get_current_dir_name();
++      if (!oldcwd)
++              return;
++
+       oldns = open(curpath, O_RDONLY);
+       if (oldns < 0)
+-              return;
++              goto errout;
+ 
+       newns = open(nsi->mntns_path, O_RDONLY);
+       if (newns < 0)
+@@ -209,11 +215,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
+       if (setns(newns, CLONE_NEWNS) < 0)
+               goto errout;
+ 
++      nc->oldcwd = oldcwd;
+       nc->oldns = oldns;
+       nc->newns = newns;
+       return;
+ 
+ errout:
++      free(oldcwd);
+       if (oldns > -1)
+               close(oldns);
+       if (newns > -1)
+@@ -222,11 +230,16 @@ errout:
+ 
+ void nsinfo__mountns_exit(struct nscookie *nc)
+ {
+-      if (nc == NULL || nc->oldns == -1 || nc->newns == -1)
++      if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd)
+               return;
+ 
+       setns(nc->oldns, CLONE_NEWNS);
+ 
++      if (nc->oldcwd) {
++              WARN_ON_ONCE(chdir(nc->oldcwd));
++              zfree(&nc->oldcwd);
++      }
++
+       if (nc->oldns > -1) {
+               close(nc->oldns);
+               nc->oldns = -1;
+diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
+index 05d82601c9a6..23584a6dd048 100644
+--- a/tools/perf/util/namespaces.h
++++ b/tools/perf/util/namespaces.h
+@@ -36,6 +36,7 @@ struct nsinfo {
+ struct nscookie {
+       int                     oldns;
+       int                     newns;
++      char                    *oldcwd;
+ };
+ 
+ int nsinfo__init(struct nsinfo *nsi);
+diff --git a/tools/testing/selftests/Makefile 
b/tools/testing/selftests/Makefile
+index ea300e7818a7..10b89f5b9af7 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -20,6 +20,7 @@ TARGETS += memory-hotplug
+ TARGETS += mount
+ TARGETS += mqueue
+ TARGETS += net
++TARGETS += netfilter
+ TARGETS += nsfs
+ TARGETS += powerpc
+ TARGETS += pstore
+diff --git a/tools/testing/selftests/netfilter/Makefile 
b/tools/testing/selftests/netfilter/Makefile
+new file mode 100644
+index 000000000000..47ed6cef93fb
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -0,0 +1,6 @@
++# SPDX-License-Identifier: GPL-2.0
++# Makefile for netfilter selftests
++
++TEST_PROGS := nft_trans_stress.sh
++
++include ../lib.mk
+diff --git a/tools/testing/selftests/netfilter/config 
b/tools/testing/selftests/netfilter/config
+new file mode 100644
+index 000000000000..1017313e41a8
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/config
+@@ -0,0 +1,2 @@
++CONFIG_NET_NS=y
++NF_TABLES_INET=y
+diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh 
b/tools/testing/selftests/netfilter/nft_trans_stress.sh
+new file mode 100755
+index 000000000000..f1affd12c4b1
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
+@@ -0,0 +1,78 @@
++#!/bin/bash
++#
++# This test is for stress-testing the nf_tables config plane path vs.
++# packet path processing: Make sure we never release rules that are
++# still visible to other cpus.
++#
++# set -e
++
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
++testns=testns1
++tables="foo bar baz quux"
++
++nft --version > /dev/null 2>&1
++if [ $? -ne 0 ];then
++      echo "SKIP: Could not run test without nft tool"
++      exit $ksft_skip
++fi
++
++ip -Version > /dev/null 2>&1
++if [ $? -ne 0 ];then
++      echo "SKIP: Could not run test without ip tool"
++      exit $ksft_skip
++fi
++
++tmp=$(mktemp)
++
++for table in $tables; do
++      echo add table inet "$table" >> "$tmp"
++      echo flush table inet "$table" >> "$tmp"
++
++      echo "add chain inet $table INPUT { type filter hook input priority 0; 
}" >> "$tmp"
++      echo "add chain inet $table OUTPUT { type filter hook output priority 
0; }" >> "$tmp"
++      for c in $(seq 1 400); do
++              chain=$(printf "chain%03u" "$c")
++              echo "add chain inet $table $chain" >> "$tmp"
++      done
++
++      for c in $(seq 1 400); do
++              chain=$(printf "chain%03u" "$c")
++              for BASE in INPUT OUTPUT; do
++                      echo "add rule inet $table $BASE counter jump $chain" 
>> "$tmp"
++              done
++              echo "add rule inet $table $chain counter return" >> "$tmp"
++      done
++done
++
++ip netns add "$testns"
++ip -netns "$testns" link set lo up
++
++lscpu | grep ^CPU\(s\): | ( read cpu cpunum ;
++cpunum=$((cpunum-1))
++for i in $(seq 0 $cpunum);do
++      mask=$(printf 0x%x $((1<<$i)))
++        ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > 
/dev/null &
++        ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
++done)
++
++sleep 1
++
++for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
++
++for table in $tables;do
++      randsleep=$((RANDOM%10))
++      sleep $randsleep
++      ip netns exec "$testns" nft delete table inet $table 2>/dev/null
++done
++
++randsleep=$((RANDOM%10))
++sleep $randsleep
++
++pkill -9 ping
++
++wait
++
++rm -f "$tmp"
++ip netns del "$testns"

Reply via email to