commit:     2593a285bdea0026b7eb3ce538e1be91c502d104
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Jul  9 15:07:11 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Jul  9 15:07:11 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2593a285

linux kernel 4.14.54

 0000_README              |    4 +
 1053_linux-4.14.54.patch | 2835 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2839 insertions(+)

diff --git a/0000_README b/0000_README
index de8ec39..6908240 100644
--- a/0000_README
+++ b/0000_README
@@ -255,6 +255,10 @@ Patch:  1052_linux-4.14.53.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.53
 
+Patch:  1053_linux-4.14.54.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.54
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1053_linux-4.14.54.patch b/1053_linux-4.14.54.patch
new file mode 100644
index 0000000..ee2b8c4
--- /dev/null
+++ b/1053_linux-4.14.54.patch
@@ -0,0 +1,2835 @@
+diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt 
b/Documentation/devicetree/bindings/net/dsa/b53.txt
+index 8acf51a4dfa8..47a6a7fe0b86 100644
+--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
++++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
+@@ -10,6 +10,7 @@ Required properties:
+       "brcm,bcm53128"
+       "brcm,bcm5365"
+       "brcm,bcm5395"
++      "brcm,bcm5389"
+       "brcm,bcm5397"
+       "brcm,bcm5398"
+ 
+diff --git a/Makefile b/Makefile
+index fb66998408f4..de0955d8dfa3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 53
++SUBLEVEL = 54
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
+index 90a741732f60..4747ede61acd 100644
+--- a/arch/arm/boot/dts/imx6q.dtsi
++++ b/arch/arm/boot/dts/imx6q.dtsi
+@@ -96,7 +96,7 @@
+                                       clocks = <&clks IMX6Q_CLK_ECSPI5>,
+                                                <&clks IMX6Q_CLK_ECSPI5>;
+                                       clock-names = "ipg", "per";
+-                                      dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
++                                      dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
+                                       dma-names = "rx", "tx";
+                                       status = "disabled";
+                               };
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index db78d353bab1..191e86c62037 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -45,6 +45,8 @@
+ #include <linux/uaccess.h>
+ #include <linux/io-64-nonatomic-lo-hi.h>
+ 
++#include "acpica/accommon.h"
++#include "acpica/acnamesp.h"
+ #include "internal.h"
+ 
+ #define _COMPONENT            ACPI_OS_SERVICES
+@@ -1477,6 +1479,76 @@ int acpi_check_region(resource_size_t start, 
resource_size_t n,
+ }
+ EXPORT_SYMBOL(acpi_check_region);
+ 
++static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
++                                            void *_res, void **return_value)
++{
++      struct acpi_mem_space_context **mem_ctx;
++      union acpi_operand_object *handler_obj;
++      union acpi_operand_object *region_obj2;
++      union acpi_operand_object *region_obj;
++      struct resource *res = _res;
++      acpi_status status;
++
++      region_obj = acpi_ns_get_attached_object(handle);
++      if (!region_obj)
++              return AE_OK;
++
++      handler_obj = region_obj->region.handler;
++      if (!handler_obj)
++              return AE_OK;
++
++      if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
++              return AE_OK;
++
++      if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
++              return AE_OK;
++
++      region_obj2 = acpi_ns_get_secondary_object(region_obj);
++      if (!region_obj2)
++              return AE_OK;
++
++      mem_ctx = (void *)&region_obj2->extra.region_context;
++
++      if (!(mem_ctx[0]->address >= res->start &&
++            mem_ctx[0]->address < res->end))
++              return AE_OK;
++
++      status = handler_obj->address_space.setup(region_obj,
++                                                ACPI_REGION_DEACTIVATE,
++                                                NULL, (void **)mem_ctx);
++      if (ACPI_SUCCESS(status))
++              region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
++
++      return status;
++}
++
++/**
++ * acpi_release_memory - Release any mappings done to a memory region
++ * @handle: Handle to namespace node
++ * @res: Memory resource
++ * @level: A level that terminates the search
++ *
++ * Walks through @handle and unmaps all SystemMemory Operation Regions that
++ * overlap with @res and that have already been activated (mapped).
++ *
++ * This is a helper that allows drivers to place special requirements on 
memory
++ * region that may overlap with operation regions, primarily allowing them to
++ * safely map the region as non-cached memory.
++ *
++ * The unmapped Operation Regions will be automatically remapped next time 
they
++ * are called, so the drivers do not need to do anything else.
++ */
++acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
++                              u32 level)
++{
++      if (!(res->flags & IORESOURCE_MEM))
++              return AE_TYPE;
++
++      return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
++                                 acpi_deactivate_mem_region, NULL, res, NULL);
++}
++EXPORT_SYMBOL_GPL(acpi_release_memory);
++
+ /*
+  * Let drivers know whether the resource checks are effective
+  */
+diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
+index 9c9a22958717..a8d2eb0ceb8d 100644
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -1151,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, 
unsigned char *byte,
+ }
+ 
+ 
+-static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
+-                                 int offset, int swap)
++static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
++                       int swap)
+ {
+       unsigned char buf[ZEPROM_SIZE];
+       struct zatm_dev *zatm_dev;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 4d08957d2108..1360a24d2ede 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -747,8 +747,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 
domain,
+       }
+       if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+               adev->vram_pin_size += amdgpu_bo_size(bo);
+-              if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+-                      adev->invisible_pin_size += amdgpu_bo_size(bo);
++              adev->invisible_pin_size += 
amdgpu_vram_mgr_bo_invisible_size(bo);
+       } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+               adev->gart_pin_size += amdgpu_bo_size(bo);
+       }
+@@ -786,8 +785,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ 
+       if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+               adev->vram_pin_size -= amdgpu_bo_size(bo);
+-              if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+-                      adev->invisible_pin_size -= amdgpu_bo_size(bo);
++              adev->invisible_pin_size -= 
amdgpu_vram_mgr_bo_invisible_size(bo);
+       } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+               adev->gart_pin_size -= amdgpu_bo_size(bo);
+       }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 43093bffa2cf..557829a84778 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -64,6 +64,7 @@ extern const struct ttm_mem_type_manager_func 
amdgpu_vram_mgr_func;
+ bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
+ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+ 
++u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
+ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 041e0121590c..308a9755eae3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -85,6 +85,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+       }
+ 
+       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
++      adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+       family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+       version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+       version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 26e900627971..86d8a961518e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -101,6 +101,22 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device 
*adev,
+               adev->mc.visible_vram_size : end) - start;
+ }
+ 
++/**
++ * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
++ *
++ * @bo: &amdgpu_bo buffer object (must be in VRAM)
++ *
++ * Returns:
++ * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
++ */
++u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
++{
++      if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
++              return amdgpu_bo_size(bo);
++
++      return 0;
++}
++
+ /**
+  * amdgpu_vram_mgr_new - allocate new ranges
+  *
+@@ -140,7 +156,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager 
*man,
+               num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+       }
+ 
+-      nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
++      nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
++                             GFP_KERNEL | __GFP_ZERO);
+       if (!nodes)
+               return -ENOMEM;
+ 
+@@ -195,7 +212,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager 
*man,
+               drm_mm_remove_node(&nodes[i]);
+       spin_unlock(&mgr->lock);
+ 
+-      kfree(nodes);
++      kvfree(nodes);
+       return r == -ENOSPC ? 0 : r;
+ }
+ 
+@@ -234,7 +251,7 @@ static void amdgpu_vram_mgr_del(struct 
ttm_mem_type_manager *man,
+       atomic64_sub(usage, &mgr->usage);
+       atomic64_sub(vis_usage, &mgr->vis_usage);
+ 
+-      kfree(mem->mm_node);
++      kvfree(mem->mm_node);
+       mem->mm_node = NULL;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index cf81065e3c5a..5183b46563f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -467,8 +467,8 @@ static int vce_v3_0_hw_init(void *handle)
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+       vce_v3_0_override_vce_clock_gating(adev, true);
+-      if (!(adev->flags & AMD_IS_APU))
+-              amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
++
++      amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+ 
+       for (i = 0; i < adev->vce.num_rings; i++)
+               adev->vce.ring[i].ready = false;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 4968b6bb9466..0327e0a6802b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -729,33 +729,59 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, 
u32 clock,
+               return r;
+ 
+       tmp = RREG32_SMC(cntl_reg);
+-      tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
+-              CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
++
++      if (adev->flags & AMD_IS_APU)
++              tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
++      else
++              tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
++                              CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
+       tmp |= dividers.post_divider;
+       WREG32_SMC(cntl_reg, tmp);
+ 
+       for (i = 0; i < 100; i++) {
+-              if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
+-                      break;
++              tmp = RREG32_SMC(status_reg);
++              if (adev->flags & AMD_IS_APU) {
++                      if (tmp & 0x10000)
++                              break;
++              } else {
++                      if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
++                              break;
++              }
+               mdelay(10);
+       }
+       if (i == 100)
+               return -ETIMEDOUT;
+-
+       return 0;
+ }
+ 
++#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
++#define ixGNB_CLK1_STATUS   0xD822010C
++#define ixGNB_CLK2_DFS_CNTL 0xD8220110
++#define ixGNB_CLK2_STATUS   0xD822012C
++#define ixGNB_CLK3_DFS_CNTL 0xD8220130
++#define ixGNB_CLK3_STATUS   0xD822014C
++
+ static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+ {
+       int r;
+ 
+-      r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
+-      if (r)
+-              return r;
++      if (adev->flags & AMD_IS_APU) {
++              r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, 
ixGNB_CLK2_STATUS);
++              if (r)
++                      return r;
+ 
+-      r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
+-      if (r)
+-              return r;
++              r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, 
ixGNB_CLK1_STATUS);
++              if (r)
++                      return r;
++      } else {
++              r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, 
ixCG_VCLK_STATUS);
++              if (r)
++                      return r;
++
++              r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, 
ixCG_DCLK_STATUS);
++              if (r)
++                      return r;
++      }
+ 
+       return 0;
+ }
+@@ -765,6 +791,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, 
u32 evclk, u32 ecclk)
+       int r, i;
+       struct atom_clock_dividers dividers;
+       u32 tmp;
++      u32 reg_ctrl;
++      u32 reg_status;
++      u32 status_mask;
++      u32 reg_mask;
++
++      if (adev->flags & AMD_IS_APU) {
++              reg_ctrl = ixGNB_CLK3_DFS_CNTL;
++              reg_status = ixGNB_CLK3_STATUS;
++              status_mask = 0x00010000;
++              reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
++      } else {
++              reg_ctrl = ixCG_ECLK_CNTL;
++              reg_status = ixCG_ECLK_STATUS;
++              status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
++              reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | 
CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
++      }
+ 
+       r = amdgpu_atombios_get_clock_dividers(adev,
+                                              
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+@@ -773,24 +815,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, 
u32 evclk, u32 ecclk)
+               return r;
+ 
+       for (i = 0; i < 100; i++) {
+-              if (RREG32_SMC(ixCG_ECLK_STATUS) & 
CG_ECLK_STATUS__ECLK_STATUS_MASK)
++              if (RREG32_SMC(reg_status) & status_mask)
+                       break;
+               mdelay(10);
+       }
++
+       if (i == 100)
+               return -ETIMEDOUT;
+ 
+-      tmp = RREG32_SMC(ixCG_ECLK_CNTL);
+-      tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
+-              CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
++      tmp = RREG32_SMC(reg_ctrl);
++      tmp &= ~reg_mask;
+       tmp |= dividers.post_divider;
+-      WREG32_SMC(ixCG_ECLK_CNTL, tmp);
++      WREG32_SMC(reg_ctrl, tmp);
+ 
+       for (i = 0; i < 100; i++) {
+-              if (RREG32_SMC(ixCG_ECLK_STATUS) & 
CG_ECLK_STATUS__ECLK_STATUS_MASK)
++              if (RREG32_SMC(reg_status) & status_mask)
+                       break;
+               mdelay(10);
+       }
++
+       if (i == 100)
+               return -ETIMEDOUT;
+ 
+diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 
b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+index 703c2d13603f..eb7c4cf19bf6 100644
+--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+@@ -889,7 +889,7 @@ static int atmel_hlcdc_plane_init_properties(struct 
atmel_hlcdc_plane *plane,
+               drm_object_attach_property(&plane->base.base,
+                                          props->alpha, 255);
+ 
+-      if (desc->layout.xstride && desc->layout.pstride) {
++      if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
+               int ret;
+ 
+               ret = drm_plane_create_rotation_property(&plane->base,
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 61a2203b75df..be813b2738c1 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2484,12 +2484,17 @@ enum i915_power_well_id {
+ #define _3D_CHICKEN   _MMIO(0x2084)
+ #define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB    (1 << 10)
+ #define _3D_CHICKEN2  _MMIO(0x208c)
++
++#define FF_SLICE_CHICKEN      _MMIO(0x2088)
++#define  FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX     (1 << 1)
++
+ /* Disables pipelining of read flushes past the SF-WIZ interface.
+  * Required on all Ironlake steppings according to the B-Spec, but the
+  * particular danger of not doing so is not specified.
+  */
+ # define _3D_CHICKEN2_WM_READ_PIPELINED                       (1 << 14)
+ #define _3D_CHICKEN3  _MMIO(0x2090)
++#define  _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX          (1 << 12)
+ #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL           (1 << 10)
+ #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL                (1 << 5)
+ #define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)     ((x)<<1) /* gen8+ */
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
b/drivers/gpu/drm/i915/intel_lrc.c
+index 6f972e6ec663..d638b641b760 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -1067,11 +1067,21 @@ static u32 *gen9_init_indirectctx_bb(struct 
intel_engine_cs *engine, u32 *batch)
+       /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
+       batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ 
++      *batch++ = MI_LOAD_REGISTER_IMM(3);
++
+       /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+-      *batch++ = MI_LOAD_REGISTER_IMM(1);
+       *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
+       *batch++ = _MASKED_BIT_DISABLE(
+                       GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
++
++      /* BSpec: 11391 */
++      *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
++      *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
++
++      /* BSpec: 11299 */
++      *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
++      *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
++
+       *batch++ = MI_NOOP;
+ 
+       /* WaClearSlmSpaceAtContextSwitch:kbl */
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c 
b/drivers/gpu/drm/qxl/qxl_display.c
+index 9a9214ae0fb5..573bab222123 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -630,7 +630,7 @@ static void qxl_cursor_atomic_update(struct drm_plane 
*plane,
+       struct qxl_cursor_cmd *cmd;
+       struct qxl_cursor *cursor;
+       struct drm_gem_object *obj;
+-      struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
++      struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
+       int ret;
+       void *user_ptr;
+       int size = 64*64*4;
+@@ -684,7 +684,7 @@ static void qxl_cursor_atomic_update(struct drm_plane 
*plane,
+                                                          cursor_bo, 0);
+               cmd->type = QXL_CURSOR_SET;
+ 
+-              qxl_bo_unref(&qcrtc->cursor_bo);
++              old_cursor_bo = qcrtc->cursor_bo;
+               qcrtc->cursor_bo = cursor_bo;
+               cursor_bo = NULL;
+       } else {
+@@ -704,6 +704,9 @@ static void qxl_cursor_atomic_update(struct drm_plane 
*plane,
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+       qxl_release_fence_buffer_objects(release);
+ 
++      if (old_cursor_bo)
++              qxl_bo_unref(&old_cursor_bo);
++
+       qxl_bo_unref(&cursor_bo);
+ 
+       return;
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 33834db7c0a0..38a2ac24428e 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3637,8 +3637,11 @@ static void raid_postsuspend(struct dm_target *ti)
+ {
+       struct raid_set *rs = ti->private;
+ 
+-      if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
++      if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
++              mddev_lock_nointr(&rs->md);
+               mddev_suspend(&rs->md);
++              mddev_unlock(&rs->md);
++      }
+ 
+       rs->md.ro = 1;
+ }
+@@ -3898,8 +3901,11 @@ static void raid_resume(struct dm_target *ti)
+       if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ 
+-      if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
++      if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
++              mddev_lock_nointr(mddev);
+               mddev_resume(mddev);
++              mddev_unlock(mddev);
++      }
+ }
+ 
+ static struct target_type raid_target = {
+diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
+index 03082e17c65c..72ce0bccc865 100644
+--- a/drivers/md/md-cluster.c
++++ b/drivers/md/md-cluster.c
+@@ -442,10 +442,11 @@ static void __remove_suspend_info(struct md_cluster_info 
*cinfo, int slot)
+ static void remove_suspend_info(struct mddev *mddev, int slot)
+ {
+       struct md_cluster_info *cinfo = mddev->cluster_info;
++      mddev->pers->quiesce(mddev, 1);
+       spin_lock_irq(&cinfo->suspend_lock);
+       __remove_suspend_info(cinfo, slot);
+       spin_unlock_irq(&cinfo->suspend_lock);
+-      mddev->pers->quiesce(mddev, 2);
++      mddev->pers->quiesce(mddev, 0);
+ }
+ 
+ 
+@@ -492,13 +493,12 @@ static void process_suspend_info(struct mddev *mddev,
+       s->lo = lo;
+       s->hi = hi;
+       mddev->pers->quiesce(mddev, 1);
+-      mddev->pers->quiesce(mddev, 0);
+       spin_lock_irq(&cinfo->suspend_lock);
+       /* Remove existing entry (if exists) before adding */
+       __remove_suspend_info(cinfo, slot);
+       list_add(&s->list, &cinfo->suspend_list);
+       spin_unlock_irq(&cinfo->suspend_lock);
+-      mddev->pers->quiesce(mddev, 2);
++      mddev->pers->quiesce(mddev, 0);
+ }
+ 
+ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg 
*cmsg)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 7143c8b9284b..11a67eac55b1 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -266,16 +266,31 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
+  * call has finished, the bio has been linked into some internal structure
+  * and so is visible to ->quiesce(), so we don't need the refcount any more.
+  */
++static bool is_suspended(struct mddev *mddev, struct bio *bio)
++{
++      if (mddev->suspended)
++              return true;
++      if (bio_data_dir(bio) != WRITE)
++              return false;
++      if (mddev->suspend_lo >= mddev->suspend_hi)
++              return false;
++      if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
++              return false;
++      if (bio_end_sector(bio) < mddev->suspend_lo)
++              return false;
++      return true;
++}
++
+ void md_handle_request(struct mddev *mddev, struct bio *bio)
+ {
+ check_suspended:
+       rcu_read_lock();
+-      if (mddev->suspended) {
++      if (is_suspended(mddev, bio)) {
+               DEFINE_WAIT(__wait);
+               for (;;) {
+                       prepare_to_wait(&mddev->sb_wait, &__wait,
+                                       TASK_UNINTERRUPTIBLE);
+-                      if (!mddev->suspended)
++                      if (!is_suspended(mddev, bio))
+                               break;
+                       rcu_read_unlock();
+                       schedule();
+@@ -344,12 +359,17 @@ static blk_qc_t md_make_request(struct request_queue *q, 
struct bio *bio)
+ void mddev_suspend(struct mddev *mddev)
+ {
+       WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
++      lockdep_assert_held(&mddev->reconfig_mutex);
+       if (mddev->suspended++)
+               return;
+       synchronize_rcu();
+       wake_up(&mddev->sb_wait);
++      set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
++      smp_mb__after_atomic();
+       wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
+       mddev->pers->quiesce(mddev, 1);
++      clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
++      wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
+ 
+       del_timer_sync(&mddev->safemode_timer);
+ }
+@@ -357,6 +377,7 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
+ 
+ void mddev_resume(struct mddev *mddev)
+ {
++      lockdep_assert_held(&mddev->reconfig_mutex);
+       if (--mddev->suspended)
+               return;
+       wake_up(&mddev->sb_wait);
+@@ -663,6 +684,7 @@ void mddev_unlock(struct mddev *mddev)
+        */
+       spin_lock(&pers_lock);
+       md_wakeup_thread(mddev->thread);
++      wake_up(&mddev->sb_wait);
+       spin_unlock(&pers_lock);
+ }
+ EXPORT_SYMBOL_GPL(mddev_unlock);
+@@ -4828,7 +4850,7 @@ suspend_lo_show(struct mddev *mddev, char *page)
+ static ssize_t
+ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
+ {
+-      unsigned long long old, new;
++      unsigned long long new;
+       int err;
+ 
+       err = kstrtoull(buf, 10, &new);
+@@ -4844,16 +4866,10 @@ suspend_lo_store(struct mddev *mddev, const char *buf, 
size_t len)
+       if (mddev->pers == NULL ||
+           mddev->pers->quiesce == NULL)
+               goto unlock;
+-      old = mddev->suspend_lo;
++      mddev_suspend(mddev);
+       mddev->suspend_lo = new;
+-      if (new >= old)
+-              /* Shrinking suspended region */
+-              mddev->pers->quiesce(mddev, 2);
+-      else {
+-              /* Expanding suspended region - need to wait */
+-              mddev->pers->quiesce(mddev, 1);
+-              mddev->pers->quiesce(mddev, 0);
+-      }
++      mddev_resume(mddev);
++
+       err = 0;
+ unlock:
+       mddev_unlock(mddev);
+@@ -4871,7 +4887,7 @@ suspend_hi_show(struct mddev *mddev, char *page)
+ static ssize_t
+ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
+ {
+-      unsigned long long old, new;
++      unsigned long long new;
+       int err;
+ 
+       err = kstrtoull(buf, 10, &new);
+@@ -4884,19 +4900,13 @@ suspend_hi_store(struct mddev *mddev, const char *buf, 
size_t len)
+       if (err)
+               return err;
+       err = -EINVAL;
+-      if (mddev->pers == NULL ||
+-          mddev->pers->quiesce == NULL)
++      if (mddev->pers == NULL)
+               goto unlock;
+-      old = mddev->suspend_hi;
++
++      mddev_suspend(mddev);
+       mddev->suspend_hi = new;
+-      if (new <= old)
+-              /* Shrinking suspended region */
+-              mddev->pers->quiesce(mddev, 2);
+-      else {
+-              /* Expanding suspended region - need to wait */
+-              mddev->pers->quiesce(mddev, 1);
+-              mddev->pers->quiesce(mddev, 0);
+-      }
++      mddev_resume(mddev);
++
+       err = 0;
+ unlock:
+       mddev_unlock(mddev);
+@@ -6642,22 +6652,26 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
+               return -ENOENT; /* cannot remove what isn't there */
+       err = 0;
+       if (mddev->pers) {
+-              mddev->pers->quiesce(mddev, 1);
+               if (fd >= 0) {
+                       struct bitmap *bitmap;
+ 
+                       bitmap = bitmap_create(mddev, -1);
++                      mddev_suspend(mddev);
+                       if (!IS_ERR(bitmap)) {
+                               mddev->bitmap = bitmap;
+                               err = bitmap_load(mddev);
+                       } else
+                               err = PTR_ERR(bitmap);
+-              }
+-              if (fd < 0 || err) {
++                      if (err) {
++                              bitmap_destroy(mddev);
++                              fd = -1;
++                      }
++                      mddev_resume(mddev);
++              } else if (fd < 0) {
++                      mddev_suspend(mddev);
+                       bitmap_destroy(mddev);
+-                      fd = -1; /* make sure to put the file */
++                      mddev_resume(mddev);
+               }
+-              mddev->pers->quiesce(mddev, 0);
+       }
+       if (fd < 0) {
+               struct file *f = mddev->bitmap_info.file;
+@@ -6941,8 +6955,8 @@ static int update_array_info(struct mddev *mddev, 
mdu_array_info_t *info)
+                               mddev->bitmap_info.default_offset;
+                       mddev->bitmap_info.space =
+                               mddev->bitmap_info.default_space;
+-                      mddev->pers->quiesce(mddev, 1);
+                       bitmap = bitmap_create(mddev, -1);
++                      mddev_suspend(mddev);
+                       if (!IS_ERR(bitmap)) {
+                               mddev->bitmap = bitmap;
+                               rv = bitmap_load(mddev);
+@@ -6950,7 +6964,7 @@ static int update_array_info(struct mddev *mddev, 
mdu_array_info_t *info)
+                               rv = PTR_ERR(bitmap);
+                       if (rv)
+                               bitmap_destroy(mddev);
+-                      mddev->pers->quiesce(mddev, 0);
++                      mddev_resume(mddev);
+               } else {
+                       /* remove the bitmap */
+                       if (!mddev->bitmap) {
+@@ -6973,9 +6987,9 @@ static int update_array_info(struct mddev *mddev, 
mdu_array_info_t *info)
+                               mddev->bitmap_info.nodes = 0;
+                               md_cluster_ops->leave(mddev);
+                       }
+-                      mddev->pers->quiesce(mddev, 1);
++                      mddev_suspend(mddev);
+                       bitmap_destroy(mddev);
+-                      mddev->pers->quiesce(mddev, 0);
++                      mddev_resume(mddev);
+                       mddev->bitmap_info.offset = 0;
+               }
+       }
+@@ -8858,6 +8872,16 @@ void md_check_recovery(struct mddev *mddev)
+       unlock:
+               wake_up(&mddev->sb_wait);
+               mddev_unlock(mddev);
++      } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && 
mddev->sb_flags) {
++              /* Write superblock - thread that called mddev_suspend()
++               * holds reconfig_mutex for us.
++               */
++              set_bit(MD_UPDATING_SB, &mddev->flags);
++              smp_mb__after_atomic();
++              if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
++                      md_update_sb(mddev, 0);
++              clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
++              wake_up(&mddev->sb_wait);
+       }
+ }
+ EXPORT_SYMBOL(md_check_recovery);
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 9b0a896890ef..11696aba94e3 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -237,6 +237,12 @@ enum mddev_flags {
+                                */
+       MD_HAS_PPL,             /* The raid array has PPL feature set */
+       MD_HAS_MULTIPLE_PPLS,   /* The raid array has multiple PPLs feature set 
*/
++      MD_ALLOW_SB_UPDATE,     /* md_check_recovery is allowed to update
++                               * the metadata without taking reconfig_mutex.
++                               */
++      MD_UPDATING_SB,         /* md_check_recovery is updating the metadata
++                               * without explicitly holding reconfig_mutex.
++                               */
+ };
+ 
+ enum mddev_sb_flags {
+@@ -540,12 +546,11 @@ struct md_personality
+       int (*check_reshape) (struct mddev *mddev);
+       int (*start_reshape) (struct mddev *mddev);
+       void (*finish_reshape) (struct mddev *mddev);
+-      /* quiesce moves between quiescence states
+-       * 0 - fully active
+-       * 1 - no new requests allowed
+-       * others - reserved
++      /* quiesce suspends or resumes internal processing.
++       * 1 - stop new actions and wait for action io to complete
++       * 0 - return to normal behaviour
+        */
+-      void (*quiesce) (struct mddev *mddev, int state);
++      void (*quiesce) (struct mddev *mddev, int quiesce);
+       /* takeover is used to transition an array from one
+        * personality to another.  The new personality must be able
+        * to handle the data in the current layout.
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 5a00fc118470..5ecba9eef441 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -768,7 +768,7 @@ static void *raid0_takeover(struct mddev *mddev)
+       return ERR_PTR(-EINVAL);
+ }
+ 
+-static void raid0_quiesce(struct mddev *mddev, int state)
++static void raid0_quiesce(struct mddev *mddev, int quiesce)
+ {
+ }
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index e4e01d3bab81..029ecba60727 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1298,11 +1298,9 @@ static void raid1_write_request(struct mddev *mddev, 
struct bio *bio,
+        */
+ 
+ 
+-      if ((bio_end_sector(bio) > mddev->suspend_lo &&
+-          bio->bi_iter.bi_sector < mddev->suspend_hi) ||
+-          (mddev_is_clustered(mddev) &&
++      if (mddev_is_clustered(mddev) &&
+            md_cluster_ops->area_resyncing(mddev, WRITE,
+-                   bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
++                   bio->bi_iter.bi_sector, bio_end_sector(bio))) {
+ 
+               /*
+                * As the suspend_* range is controlled by userspace, we want
+@@ -1313,12 +1311,10 @@ static void raid1_write_request(struct mddev *mddev, 
struct bio *bio,
+                       sigset_t full, old;
+                       prepare_to_wait(&conf->wait_barrier,
+                                       &w, TASK_INTERRUPTIBLE);
+-                      if ((bio_end_sector(bio) <= mddev->suspend_lo ||
+-                           bio->bi_iter.bi_sector >= mddev->suspend_hi) &&
+-                          (!mddev_is_clustered(mddev) ||
+-                           !md_cluster_ops->area_resyncing(mddev, WRITE,
++                      if (!mddev_is_clustered(mddev) ||
++                          !md_cluster_ops->area_resyncing(mddev, WRITE,
+                                                       bio->bi_iter.bi_sector,
+-                                                      bio_end_sector(bio))))
++                                                      bio_end_sector(bio)))
+                               break;
+                       sigfillset(&full);
+                       sigprocmask(SIG_BLOCK, &full, &old);
+@@ -3280,21 +3276,14 @@ static int raid1_reshape(struct mddev *mddev)
+       return 0;
+ }
+ 
+-static void raid1_quiesce(struct mddev *mddev, int state)
++static void raid1_quiesce(struct mddev *mddev, int quiesce)
+ {
+       struct r1conf *conf = mddev->private;
+ 
+-      switch(state) {
+-      case 2: /* wake for suspend */
+-              wake_up(&conf->wait_barrier);
+-              break;
+-      case 1:
++      if (quiesce)
+               freeze_array(conf, 0);
+-              break;
+-      case 0:
++      else
+               unfreeze_array(conf);
+-              break;
+-      }
+ }
+ 
+ static void *raid1_takeover(struct mddev *mddev)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 5fb31ef52945..b20c23f970f4 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3838,18 +3838,14 @@ static void raid10_free(struct mddev *mddev, void 
*priv)
+       kfree(conf);
+ }
+ 
+-static void raid10_quiesce(struct mddev *mddev, int state)
++static void raid10_quiesce(struct mddev *mddev, int quiesce)
+ {
+       struct r10conf *conf = mddev->private;
+ 
+-      switch(state) {
+-      case 1:
++      if (quiesce)
+               raise_barrier(conf, 0);
+-              break;
+-      case 0:
++      else
+               lower_barrier(conf);
+-              break;
+-      }
+ }
+ 
+ static int raid10_resize(struct mddev *mddev, sector_t sectors)
+diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
+index 9a340728b846..0d535b40cb3b 100644
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -693,6 +693,8 @@ static void r5c_disable_writeback_async(struct work_struct 
*work)
+       struct r5l_log *log = container_of(work, struct r5l_log,
+                                          disable_writeback_work);
+       struct mddev *mddev = log->rdev->mddev;
++      struct r5conf *conf = mddev->private;
++      int locked = 0;
+ 
+       if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+               return;
+@@ -701,11 +703,15 @@ static void r5c_disable_writeback_async(struct 
work_struct *work)
+ 
+       /* wait superblock change before suspend */
+       wait_event(mddev->sb_wait,
+-                 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
+-
+-      mddev_suspend(mddev);
+-      log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+-      mddev_resume(mddev);
++                 conf->log == NULL ||
++                 (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
++                  (locked = mddev_trylock(mddev))));
++      if (locked) {
++              mddev_suspend(mddev);
++              log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
++              mddev_resume(mddev);
++              mddev_unlock(mddev);
++      }
+ }
+ 
+ static void r5l_submit_current_io(struct r5l_log *log)
+@@ -1583,21 +1589,21 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t 
space)
+       md_wakeup_thread(log->reclaim_thread);
+ }
+ 
+-void r5l_quiesce(struct r5l_log *log, int state)
++void r5l_quiesce(struct r5l_log *log, int quiesce)
+ {
+       struct mddev *mddev;
+-      if (!log || state == 2)
++      if (!log)
+               return;
+-      if (state == 0)
+-              kthread_unpark(log->reclaim_thread->tsk);
+-      else if (state == 1) {
++
++      if (quiesce) {
+               /* make sure r5l_write_super_and_discard_space exits */
+               mddev = log->rdev->mddev;
+               wake_up(&mddev->sb_wait);
+               kthread_park(log->reclaim_thread->tsk);
+               r5l_wake_reclaim(log, MaxSector);
+               r5l_do_reclaim(log);
+-      }
++      } else
++              kthread_unpark(log->reclaim_thread->tsk);
+ }
+ 
+ bool r5l_log_disk_error(struct r5conf *conf)
+@@ -3161,6 +3167,8 @@ void r5l_exit_log(struct r5conf *conf)
+       conf->log = NULL;
+       synchronize_rcu();
+ 
++      /* Ensure disable_writeback_work wakes up and exits */
++      wake_up(&conf->mddev->sb_wait);
+       flush_work(&log->disable_writeback_work);
+       md_unregister_thread(&log->reclaim_thread);
+       mempool_destroy(log->meta_pool);
+diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
+index 7f9ad5f7cda0..284578b0a349 100644
+--- a/drivers/md/raid5-log.h
++++ b/drivers/md/raid5-log.h
+@@ -9,7 +9,7 @@ extern void r5l_write_stripe_run(struct r5l_log *log);
+ extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
+ extern void r5l_stripe_write_finished(struct stripe_head *sh);
+ extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
+-extern void r5l_quiesce(struct r5l_log *log, int state);
++extern void r5l_quiesce(struct r5l_log *log, int quiesce);
+ extern bool r5l_log_disk_error(struct r5conf *conf);
+ extern bool r5c_is_writeback(struct r5l_log *log);
+ extern int
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index de1ef6264ee7..07ca2fd10189 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5686,28 +5686,6 @@ static bool raid5_make_request(struct mddev *mddev, 
struct bio * bi)
+                               goto retry;
+                       }
+ 
+-                      if (rw == WRITE &&
+-                          logical_sector >= mddev->suspend_lo &&
+-                          logical_sector < mddev->suspend_hi) {
+-                              raid5_release_stripe(sh);
+-                              /* As the suspend_* range is controlled by
+-                               * userspace, we want an interruptible
+-                               * wait.
+-                               */
+-                              prepare_to_wait(&conf->wait_for_overlap,
+-                                              &w, TASK_INTERRUPTIBLE);
+-                              if (logical_sector >= mddev->suspend_lo &&
+-                                  logical_sector < mddev->suspend_hi) {
+-                                      sigset_t full, old;
+-                                      sigfillset(&full);
+-                                      sigprocmask(SIG_BLOCK, &full, &old);
+-                                      schedule();
+-                                      sigprocmask(SIG_SETMASK, &old, NULL);
+-                                      do_prepare = true;
+-                              }
+-                              goto retry;
+-                      }
+-
+                       if (test_bit(STRIPE_EXPANDING, &sh->state) ||
+                           !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
+                               /* Stripe is busy expanding or
+@@ -8025,16 +8003,12 @@ static void raid5_finish_reshape(struct mddev *mddev)
+       }
+ }
+ 
+-static void raid5_quiesce(struct mddev *mddev, int state)
++static void raid5_quiesce(struct mddev *mddev, int quiesce)
+ {
+       struct r5conf *conf = mddev->private;
+ 
+-      switch(state) {
+-      case 2: /* resume for a suspend */
+-              wake_up(&conf->wait_for_overlap);
+-              break;
+-
+-      case 1: /* stop all writes */
++      if (quiesce) {
++              /* stop all writes */
+               lock_all_device_hash_locks_irq(conf);
+               /* '2' tells resync/reshape to pause so that all
+                * active stripes can drain
+@@ -8050,17 +8024,15 @@ static void raid5_quiesce(struct mddev *mddev, int 
state)
+               unlock_all_device_hash_locks_irq(conf);
+               /* allow reshape to continue */
+               wake_up(&conf->wait_for_overlap);
+-              break;
+-
+-      case 0: /* re-enable writes */
++      } else {
++              /* re-enable writes */
+               lock_all_device_hash_locks_irq(conf);
+               conf->quiesce = 0;
+               wake_up(&conf->wait_for_quiescent);
+               wake_up(&conf->wait_for_overlap);
+               unlock_all_device_hash_locks_irq(conf);
+-              break;
+       }
+-      r5l_quiesce(conf->log, state);
++      r5l_quiesce(conf->log, quiesce);
+ }
+ 
+ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 528e04f96c13..d410de331854 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+ 
+       for (; page < page_end; page++) {
+               res = chip->ecc.read_oob(mtd, chip, page);
+-              if (res)
++              if (res < 0)
+                       return res;
+ 
+               bad = chip->oob_poi[chip->badblockpos];
+diff --git a/drivers/net/dsa/b53/b53_common.c 
b/drivers/net/dsa/b53/b53_common.c
+index 274f3679f33d..acf64d4cd94c 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1549,6 +1549,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .cpu_port = B53_CPU_PORT_25,
+               .duplex_reg = B53_DUPLEX_STAT_FE,
+       },
++      {
++              .chip_id = BCM5389_DEVICE_ID,
++              .dev_name = "BCM5389",
++              .vlans = 4096,
++              .enabled_ports = 0x1f,
++              .arl_entries = 4,
++              .cpu_port = B53_CPU_PORT,
++              .vta_regs = B53_VTA_REGS,
++              .duplex_reg = B53_DUPLEX_STAT_GE,
++              .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
++              .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
++      },
+       {
+               .chip_id = BCM5395_DEVICE_ID,
+               .dev_name = "BCM5395",
+@@ -1872,6 +1884,7 @@ int b53_switch_detect(struct b53_device *dev)
+               else
+                       dev->chip_id = BCM5365_DEVICE_ID;
+               break;
++      case BCM5389_DEVICE_ID:
+       case BCM5395_DEVICE_ID:
+       case BCM5397_DEVICE_ID:
+       case BCM5398_DEVICE_ID:
+diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
+index fa7556f5d4fb..a533a90e3904 100644
+--- a/drivers/net/dsa/b53/b53_mdio.c
++++ b/drivers/net/dsa/b53/b53_mdio.c
+@@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = {
+ #define B53_BRCM_OUI_1        0x0143bc00
+ #define B53_BRCM_OUI_2        0x03625c00
+ #define B53_BRCM_OUI_3        0x00406000
++#define B53_BRCM_OUI_4        0x01410c00
+ 
+ static int b53_mdio_probe(struct mdio_device *mdiodev)
+ {
+@@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
+        */
+       if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
+           (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
+-          (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
++          (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
++          (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
+               dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
+               return -ENODEV;
+       }
+@@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = {
+       { .compatible = "brcm,bcm53125" },
+       { .compatible = "brcm,bcm53128" },
+       { .compatible = "brcm,bcm5365" },
++      { .compatible = "brcm,bcm5389" },
+       { .compatible = "brcm,bcm5395" },
+       { .compatible = "brcm,bcm5397" },
+       { .compatible = "brcm,bcm5398" },
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index 01bd8cbe9a3f..6b9e39ddaec1 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -48,6 +48,7 @@ struct b53_io_ops {
+ enum {
+       BCM5325_DEVICE_ID = 0x25,
+       BCM5365_DEVICE_ID = 0x65,
++      BCM5389_DEVICE_ID = 0x89,
+       BCM5395_DEVICE_ID = 0x95,
+       BCM5397_DEVICE_ID = 0x97,
+       BCM5398_DEVICE_ID = 0x98,
+diff --git a/drivers/net/ethernet/natsemi/sonic.c 
b/drivers/net/ethernet/natsemi/sonic.c
+index 612c7a44b26c..23821540ab07 100644
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -71,7 +71,7 @@ static int sonic_open(struct net_device *dev)
+       for (i = 0; i < SONIC_NUM_RRS; i++) {
+               dma_addr_t laddr = dma_map_single(lp->device, 
skb_put(lp->rx_skb[i], SONIC_RBSIZE),
+                                                 SONIC_RBSIZE, 
DMA_FROM_DEVICE);
+-              if (!laddr) {
++              if (dma_mapping_error(lp->device, laddr)) {
+                       while(i > 0) { /* free any that were mapped 
successfully */
+                               i--;
+                               dma_unmap_single(lp->device, lp->rx_laddr[i], 
SONIC_RBSIZE, DMA_FROM_DEVICE);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 8e06f308ce44..b23ee948e7c9 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+       {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
+       {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
++      {QMI_FIXED_INTF(0x0846, 0x68d3, 8)},    /* Netgear Aircard 779S */
+       {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
+       {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
+       {QMI_FIXED_INTF(0x1435, 0xd181, 3)},    /* Wistron NeWeb D18Q1 */
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c 
b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 12a9b86d71ea..dffa697d71e0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1499,14 +1499,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev 
*pdev,
+                                       struct iwl_trans *trans)
+ {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-      int max_irqs, num_irqs, i, ret, nr_online_cpus;
++      int max_irqs, num_irqs, i, ret;
+       u16 pci_cmd;
+ 
+       if (!trans->cfg->mq_rx_supported)
+               goto enable_msi;
+ 
+-      nr_online_cpus = num_online_cpus();
+-      max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
++      max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
+       for (i = 0; i < max_irqs; i++)
+               trans_pcie->msix_entries[i].entry = i;
+ 
+@@ -1532,16 +1531,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev 
*pdev,
+        * Two interrupts less: non rx causes shared with FBQ and RSS.
+        * More than two interrupts: we will use fewer RSS queues.
+        */
+-      if (num_irqs <= nr_online_cpus) {
++      if (num_irqs <= max_irqs - 2) {
+               trans_pcie->trans->num_rx_queues = num_irqs + 1;
+               trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
+                       IWL_SHARED_IRQ_FIRST_RSS;
+-      } else if (num_irqs == nr_online_cpus + 1) {
++      } else if (num_irqs == max_irqs - 1) {
+               trans_pcie->trans->num_rx_queues = num_irqs;
+               trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
+       } else {
+               trans_pcie->trans->num_rx_queues = num_irqs - 1;
+       }
++      WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
+ 
+       trans_pcie->alloc_vecs = num_irqs;
+       trans_pcie->msix_enabled = true;
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 48e1541dc8d4..7440f650e81a 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -161,6 +161,16 @@ MODULE_LICENSE("GPL");
+ 
+ static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
+ 
++static bool ashs_present(void)
++{
++      int i = 0;
++      while (ashs_ids[i]) {
++              if (acpi_dev_found(ashs_ids[i++]))
++                      return true;
++      }
++      return false;
++}
++
+ struct bios_args {
+       u32 arg0;
+       u32 arg1;
+@@ -962,6 +972,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
+ 
+ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
+ {
++      if (asus->driver->wlan_ctrl_by_user && ashs_present())
++              return;
++
+       asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
+       asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
+       asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
+@@ -2058,16 +2071,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
+       return 0;
+ }
+ 
+-static bool ashs_present(void)
+-{
+-      int i = 0;
+-      while (ashs_ids[i]) {
+-              if (acpi_dev_found(ashs_ids[i++]))
+-                      return true;
+-      }
+-      return false;
+-}
+-
+ /*
+  * WMI Driver
+  */
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index e67c1d8a193d..d072f84a8535 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -3049,7 +3049,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx 
*hctx,
+       cqr->callback_data = req;
+       cqr->status = DASD_CQR_FILLED;
+       cqr->dq = dq;
+-      req->completion_data = cqr;
++      *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
++
+       blk_mq_start_request(req);
+       spin_lock(&block->queue_lock);
+       list_add_tail(&cqr->blocklist, &block->ccw_queue);
+@@ -3073,12 +3074,13 @@ static blk_status_t do_dasd_request(struct 
blk_mq_hw_ctx *hctx,
+  */
+ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
+ {
+-      struct dasd_ccw_req *cqr = req->completion_data;
+       struct dasd_block *block = req->q->queuedata;
+       struct dasd_device *device;
++      struct dasd_ccw_req *cqr;
+       unsigned long flags;
+       int rc = 0;
+ 
++      cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
+       if (!cqr)
+               return BLK_EH_NOT_HANDLED;
+ 
+@@ -3184,6 +3186,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
+       int rc;
+ 
+       block->tag_set.ops = &dasd_mq_ops;
++      block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
+       block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
+       block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
+       block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+diff --git a/drivers/staging/android/ion/ion_heap.c 
b/drivers/staging/android/ion/ion_heap.c
+index 91faa7f035b9..babbd94c32d9 100644
+--- a/drivers/staging/android/ion/ion_heap.c
++++ b/drivers/staging/android/ion/ion_heap.c
+@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
+       struct page **tmp = pages;
+ 
+       if (!pages)
+-              return NULL;
++              return ERR_PTR(-ENOMEM);
+ 
+       if (buffer->flags & ION_FLAG_CACHED)
+               pgprot = PAGE_KERNEL;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 1c70541a1467..0475f9685a41 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -126,6 +126,8 @@ struct n_tty_data {
+       struct mutex output_lock;
+ };
+ 
++#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
++
+ static inline size_t read_cnt(struct n_tty_data *ldata)
+ {
+       return ldata->read_head - ldata->read_tail;
+@@ -143,6 +145,7 @@ static inline unsigned char *read_buf_addr(struct 
n_tty_data *ldata, size_t i)
+ 
+ static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
+ {
++      smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
+       return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
+ }
+ 
+@@ -318,9 +321,7 @@ static inline void put_tty_queue(unsigned char c, struct 
n_tty_data *ldata)
+ static void reset_buffer_flags(struct n_tty_data *ldata)
+ {
+       ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
+-      ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
+       ldata->commit_head = 0;
+-      ldata->echo_mark = 0;
+       ldata->line_start = 0;
+ 
+       ldata->erasing = 0;
+@@ -619,12 +620,19 @@ static size_t __process_echoes(struct tty_struct *tty)
+       old_space = space = tty_write_room(tty);
+ 
+       tail = ldata->echo_tail;
+-      while (ldata->echo_commit != tail) {
++      while (MASK(ldata->echo_commit) != MASK(tail)) {
+               c = echo_buf(ldata, tail);
+               if (c == ECHO_OP_START) {
+                       unsigned char op;
+                       int no_space_left = 0;
+ 
++                      /*
++                       * Since add_echo_byte() is called without holding
++                       * output_lock, we might see only portion of multi-byte
++                       * operation.
++                       */
++                      if (MASK(ldata->echo_commit) == MASK(tail + 1))
++                              goto not_yet_stored;
+                       /*
+                        * If the buffer byte is the start of a multi-byte
+                        * operation, get the next byte, which is either the
+@@ -636,6 +644,8 @@ static size_t __process_echoes(struct tty_struct *tty)
+                               unsigned int num_chars, num_bs;
+ 
+                       case ECHO_OP_ERASE_TAB:
++                              if (MASK(ldata->echo_commit) == MASK(tail + 2))
++                                      goto not_yet_stored;
+                               num_chars = echo_buf(ldata, tail + 2);
+ 
+                               /*
+@@ -730,7 +740,8 @@ static size_t __process_echoes(struct tty_struct *tty)
+       /* If the echo buffer is nearly full (so that the possibility exists
+        * of echo overrun before the next commit), then discard enough
+        * data at the tail to prevent a subsequent overrun */
+-      while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
++      while (ldata->echo_commit > tail &&
++             ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+               if (echo_buf(ldata, tail) == ECHO_OP_START) {
+                       if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
+                               tail += 3;
+@@ -740,6 +751,7 @@ static size_t __process_echoes(struct tty_struct *tty)
+                       tail++;
+       }
+ 
++ not_yet_stored:
+       ldata->echo_tail = tail;
+       return old_space - space;
+ }
+@@ -750,6 +762,7 @@ static void commit_echoes(struct tty_struct *tty)
+       size_t nr, old, echoed;
+       size_t head;
+ 
++      mutex_lock(&ldata->output_lock);
+       head = ldata->echo_head;
+       ldata->echo_mark = head;
+       old = ldata->echo_commit - ldata->echo_tail;
+@@ -758,10 +771,12 @@ static void commit_echoes(struct tty_struct *tty)
+        * is over the threshold (and try again each time another
+        * block is accumulated) */
+       nr = head - ldata->echo_tail;
+-      if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
++      if (nr < ECHO_COMMIT_WATERMARK ||
++          (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
++              mutex_unlock(&ldata->output_lock);
+               return;
++      }
+ 
+-      mutex_lock(&ldata->output_lock);
+       ldata->echo_commit = head;
+       echoed = __process_echoes(tty);
+       mutex_unlock(&ldata->output_lock);
+@@ -812,7 +827,9 @@ static void flush_echoes(struct tty_struct *tty)
+ 
+ static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
+ {
+-      *echo_buf_addr(ldata, ldata->echo_head++) = c;
++      *echo_buf_addr(ldata, ldata->echo_head) = c;
++      smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
++      ldata->echo_head++;
+ }
+ 
+ /**
+@@ -980,14 +997,15 @@ static void eraser(unsigned char c, struct tty_struct 
*tty)
+       }
+ 
+       seen_alnums = 0;
+-      while (ldata->read_head != ldata->canon_head) {
++      while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
+               head = ldata->read_head;
+ 
+               /* erase a single possibly multibyte character */
+               do {
+                       head--;
+                       c = read_buf(ldata, head);
+-              } while (is_continuation(c, tty) && head != ldata->canon_head);
++              } while (is_continuation(c, tty) &&
++                       MASK(head) != MASK(ldata->canon_head));
+ 
+               /* do not partially erase */
+               if (is_continuation(c, tty))
+@@ -1029,7 +1047,7 @@ static void eraser(unsigned char c, struct tty_struct 
*tty)
+                                * This info is used to go back the correct
+                                * number of columns.
+                                */
+-                              while (tail != ldata->canon_head) {
++                              while (MASK(tail) != MASK(ldata->canon_head)) {
+                                       tail--;
+                                       c = read_buf(ldata, tail);
+                                       if (c == '\t') {
+@@ -1304,7 +1322,7 @@ n_tty_receive_char_special(struct tty_struct *tty, 
unsigned char c)
+                       finish_erasing(ldata);
+                       echo_char(c, tty);
+                       echo_char_raw('\n', ldata);
+-                      while (tail != ldata->read_head) {
++                      while (MASK(tail) != MASK(ldata->read_head)) {
+                               echo_char(read_buf(ldata, tail), tty);
+                               tail++;
+                       }
+@@ -1880,30 +1898,21 @@ static int n_tty_open(struct tty_struct *tty)
+       struct n_tty_data *ldata;
+ 
+       /* Currently a malloc failure here can panic */
+-      ldata = vmalloc(sizeof(*ldata));
++      ldata = vzalloc(sizeof(*ldata));
+       if (!ldata)
+-              goto err;
++              return -ENOMEM;
+ 
+       ldata->overrun_time = jiffies;
+       mutex_init(&ldata->atomic_read_lock);
+       mutex_init(&ldata->output_lock);
+ 
+       tty->disc_data = ldata;
+-      reset_buffer_flags(tty->disc_data);
+-      ldata->column = 0;
+-      ldata->canon_column = 0;
+-      ldata->num_overrun = 0;
+-      ldata->no_room = 0;
+-      ldata->lnext = 0;
+       tty->closing = 0;
+       /* indicate buffer work may resume */
+       clear_bit(TTY_LDISC_HALTED, &tty->flags);
+       n_tty_set_termios(tty, NULL);
+       tty_unthrottle(tty);
+-
+       return 0;
+-err:
+-      return -ENOMEM;
+ }
+ 
+ static inline int input_available_p(struct tty_struct *tty, int poll)
+@@ -2413,7 +2422,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
+       tail = ldata->read_tail;
+       nr = head - tail;
+       /* Skip EOF-chars.. */
+-      while (head != tail) {
++      while (MASK(head) != MASK(tail)) {
+               if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
+                   read_buf(ldata, tail) == __DISABLED_CHAR)
+                       nr--;
+diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
+index 97db76afced2..ae2564ecddcd 100644
+--- a/drivers/tty/serdev/core.c
++++ b/drivers/tty/serdev/core.c
+@@ -482,6 +482,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
+ static void __exit serdev_exit(void)
+ {
+       bus_unregister(&serdev_bus_type);
++      ida_destroy(&ctrl_ida);
+ }
+ module_exit(serdev_exit);
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c 
b/drivers/tty/serial/8250/8250_pci.c
+index 0d814a87acb2..4986b4aebe80 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -3345,9 +3345,7 @@ static const struct pci_device_id blacklist[] = {
+       /* multi-io cards handled by parport_serial */
+       { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
+       { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
+-      { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
+       { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+-      { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
+ 
+       /* Moxa Smartio MUE boards handled by 8250_moxa */
+       { PCI_VDEVICE(MOXA, 0x1024), },
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index de67abbda921..e77421e7bf46 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -782,7 +782,7 @@ int vc_allocate(unsigned int currcons)     /* return 0 on 
success */
+       if (!*vc->vc_uni_pagedir_loc)
+               con_set_default_unimap(vc);
+ 
+-      vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
++      vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+       if (!vc->vc_screenbuf)
+               goto err_free;
+ 
+@@ -869,7 +869,7 @@ static int vc_do_resize(struct tty_struct *tty, struct 
vc_data *vc,
+ 
+       if (new_screen_size > (4 << 20))
+               return -EINVAL;
+-      newscreen = kmalloc(new_screen_size, GFP_USER);
++      newscreen = kzalloc(new_screen_size, GFP_USER);
+       if (!newscreen)
+               return -ENOMEM;
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 22952d70b981..3b9aadd007f5 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1771,6 +1771,9 @@ static const struct usb_device_id acm_ids[] = {
+       { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
+       .driver_info = SINGLE_RX_URB,
+       },
++      { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
++      .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
++      },
+       { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
+diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
+index 3ae8b1bbaa55..7f51a77bc5cc 100644
+--- a/drivers/usb/dwc2/hcd_queue.c
++++ b/drivers/usb/dwc2/hcd_queue.c
+@@ -379,7 +379,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg 
*hsotg,
+       /* Get the map and adjust if this is a multi_tt hub */
+       map = qh->dwc_tt->periodic_bitmaps;
+       if (qh->dwc_tt->usb_tt->multi)
+-              map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
++              map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
+ 
+       return map;
+ }
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index efd7e4882d66..00b710016d21 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -891,12 +891,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int 
slot_id)
+ 
+       dev = xhci->devs[slot_id];
+ 
+-      trace_xhci_free_virt_device(dev);
+-
+       xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
+       if (!dev)
+               return;
+ 
++      trace_xhci_free_virt_device(dev);
++
+       if (dev->tt_info)
+               old_active_eps = dev->tt_info->active_eps;
+ 
+diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
+index f20753b99624..02a1164ca599 100644
+--- a/drivers/usb/host/xhci-trace.h
++++ b/drivers/usb/host/xhci-trace.h
+@@ -158,6 +158,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
+       TP_ARGS(ring, trb)
+ );
+ 
++DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
++      TP_PROTO(struct xhci_virt_device *vdev),
++      TP_ARGS(vdev),
++      TP_STRUCT__entry(
++              __field(void *, vdev)
++              __field(unsigned long long, out_ctx)
++              __field(unsigned long long, in_ctx)
++              __field(u8, fake_port)
++              __field(u8, real_port)
++              __field(u16, current_mel)
++
++      ),
++      TP_fast_assign(
++              __entry->vdev = vdev;
++              __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
++              __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
++              __entry->fake_port = (u8) vdev->fake_port;
++              __entry->real_port = (u8) vdev->real_port;
++              __entry->current_mel = (u16) vdev->current_mel;
++              ),
++      TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d 
current_mel %d",
++              __entry->vdev, __entry->in_ctx, __entry->out_ctx,
++              __entry->fake_port, __entry->real_port, __entry->current_mel
++      )
++);
++
++DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
++      TP_PROTO(struct xhci_virt_device *vdev),
++      TP_ARGS(vdev)
++);
++
+ DECLARE_EVENT_CLASS(xhci_log_virt_dev,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev),
+@@ -195,11 +226,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
+       TP_ARGS(vdev)
+ );
+ 
+-DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
+-      TP_PROTO(struct xhci_virt_device *vdev),
+-      TP_ARGS(vdev)
+-);
+-
+ DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev)
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index d0f00274d16c..142a83e5974c 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -98,6 +98,9 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
+       { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+       { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
++      { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality 
Monitor */
++      { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality 
Monitor */
++      { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality 
Monitor */
+       { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
+       { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
+       { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
+@@ -115,6 +118,9 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., 
Fasttrax GPS demonstration module */
+       { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+       { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++      { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
++      { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault 
Detector */
++      { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
+       { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
+       { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+       { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+@@ -127,7 +133,9 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System 
Console */
+       { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+       { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
++      { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
+       { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
++      { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
+       { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+       { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+       { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+@@ -137,17 +145,23 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
+       { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB 
Device */
++      { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer 
*/
++      { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer 
*/
+       { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+       { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
+       { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
+       { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
++      { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor 
Bridge Controller */
+       { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+       { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long 
Range */
+       { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
+       { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
++      { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update 
(CP2101-4/CP2102N) */
+       { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
++      { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) 
*/
++      { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) 
*/
+       { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
+       { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
+       { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index dd24c5c1534d..251f5d66651e 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -346,6 +346,19 @@ static void ucsi_connector_change(struct work_struct 
*work)
+       }
+ 
+       if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
++              typec_set_pwr_role(con->port, con->status.pwr_dir);
++
++              switch (con->status.partner_type) {
++              case UCSI_CONSTAT_PARTNER_TYPE_UFP:
++                      typec_set_data_role(con->port, TYPEC_HOST);
++                      break;
++              case UCSI_CONSTAT_PARTNER_TYPE_DFP:
++                      typec_set_data_role(con->port, TYPEC_DEVICE);
++                      break;
++              default:
++                      break;
++              }
++
+               if (con->status.connected)
+                       ucsi_register_partner(con);
+               else
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c 
b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index cabd47612b0a..494d2a49203a 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -82,6 +82,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
+               return -ENODEV;
+       }
+ 
++      /* This will make sure we can use ioremap_nocache() */
++      status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
++      if (ACPI_FAILURE(status))
++              return -ENOMEM;
++
+       /*
+        * NOTE: The memory region for the data structures is used also in an
+        * operation region, which means ACPI has already reserved it. Therefore
+diff --git a/fs/afs/security.c b/fs/afs/security.c
+index faca66227ecf..859096e25f2c 100644
+--- a/fs/afs/security.c
++++ b/fs/afs/security.c
+@@ -323,18 +323,14 @@ int afs_permission(struct inode *inode, int mask)
+              mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
+ 
+       if (S_ISDIR(inode->i_mode)) {
+-              if (mask & MAY_EXEC) {
++              if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) {
+                       if (!(access & AFS_ACE_LOOKUP))
+                               goto permission_denied;
+-              } else if (mask & MAY_READ) {
+-                      if (!(access & AFS_ACE_LOOKUP))
+-                              goto permission_denied;
+-              } else if (mask & MAY_WRITE) {
++              }
++              if (mask & MAY_WRITE) {
+                       if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, 
rename from */
+                                       AFS_ACE_INSERT))) /* create, mkdir, 
symlink, rename to */
+                               goto permission_denied;
+-              } else {
+-                      BUG();
+               }
+       } else {
+               if (!(access & AFS_ACE_LOOKUP))
+diff --git a/fs/inode.c b/fs/inode.c
+index d1e35b53bb23..e07b3e1f5970 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -177,6 +177,7 @@ int inode_init_always(struct super_block *sb, struct inode 
*inode)
+       mapping->a_ops = &empty_aops;
+       mapping->host = inode;
+       mapping->flags = 0;
++      mapping->wb_err = 0;
+       atomic_set(&mapping->i_mmap_writable, 0);
+       mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
+       mapping->private_data = NULL;
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 502af53ec012..13c105121a18 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -441,6 +441,9 @@ int acpi_check_resource_conflict(const struct resource 
*res);
+ int acpi_check_region(resource_size_t start, resource_size_t n,
+                     const char *name);
+ 
++acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
++                              u32 level);
++
+ int acpi_resources_are_enforced(void);
+ 
+ #ifdef CONFIG_HIBERNATION
+diff --git a/include/net/netfilter/nf_tables.h 
b/include/net/netfilter/nf_tables.h
+index 079c69cae2f6..59a4f50ffe8d 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -177,6 +177,7 @@ struct nft_data_desc {
+ int nft_data_init(const struct nft_ctx *ctx,
+                 struct nft_data *data, unsigned int size,
+                 struct nft_data_desc *desc, const struct nlattr *nla);
++void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
+ void nft_data_release(const struct nft_data *data, enum nft_data_types type);
+ int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+                 enum nft_data_types type, unsigned int len);
+@@ -731,6 +732,10 @@ struct nft_expr_ops {
+       int                             (*init)(const struct nft_ctx *ctx,
+                                               const struct nft_expr *expr,
+                                               const struct nlattr * const 
tb[]);
++      void                            (*activate)(const struct nft_ctx *ctx,
++                                                  const struct nft_expr 
*expr);
++      void                            (*deactivate)(const struct nft_ctx *ctx,
++                                                    const struct nft_expr 
*expr);
+       void                            (*destroy)(const struct nft_ctx *ctx,
+                                                  const struct nft_expr *expr);
+       int                             (*dump)(struct sk_buff *skb,
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index f287dcbe8cb2..31615d1ae44c 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -894,6 +894,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct 
*p, int flags)
+ }
+ 
+ #ifdef CONFIG_SMP
++
++static inline bool is_per_cpu_kthread(struct task_struct *p)
++{
++      if (!(p->flags & PF_KTHREAD))
++              return false;
++
++      if (p->nr_cpus_allowed != 1)
++              return false;
++
++      return true;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++      if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
++              return false;
++
++      if (is_per_cpu_kthread(p))
++              return cpu_online(cpu);
++
++      return cpu_active(cpu);
++}
++
+ /*
+  * This is how migration works:
+  *
+@@ -951,16 +978,8 @@ struct migration_arg {
+ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
+                                struct task_struct *p, int dest_cpu)
+ {
+-      if (p->flags & PF_KTHREAD) {
+-              if (unlikely(!cpu_online(dest_cpu)))
+-                      return rq;
+-      } else {
+-              if (unlikely(!cpu_active(dest_cpu)))
+-                      return rq;
+-      }
+-
+       /* Affinity changed (again). */
+-      if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
++      if (!is_cpu_allowed(p, dest_cpu))
+               return rq;
+ 
+       update_rq_clock(rq);
+@@ -1489,10 +1508,9 @@ static int select_fallback_rq(int cpu, struct 
task_struct *p)
+       for (;;) {
+               /* Any allowed, online CPU? */
+               for_each_cpu(dest_cpu, &p->cpus_allowed) {
+-                      if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
+-                              continue;
+-                      if (!cpu_online(dest_cpu))
++                      if (!is_cpu_allowed(p, dest_cpu))
+                               continue;
++
+                       goto out;
+               }
+ 
+@@ -1555,8 +1573,7 @@ int select_task_rq(struct task_struct *p, int cpu, int 
sd_flags, int wake_flags)
+        * [ this allows ->select_task() to simply return task_cpu(p) and
+        *   not worry about this generic constraint ]
+        */
+-      if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
+-                   !cpu_online(cpu)))
++      if (unlikely(!is_cpu_allowed(p, cpu)))
+               cpu = select_fallback_rq(task_cpu(p), p);
+ 
+       return cpu;
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 5b8cd359c4c0..e27fb6e97d18 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1950,7 +1950,8 @@ static int compat_mtw_from_user(struct 
compat_ebt_entry_mwt *mwt,
+       int off, pad = 0;
+       unsigned int size_kern, match_size = mwt->match_size;
+ 
+-      strlcpy(name, mwt->u.name, sizeof(name));
++      if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
++              return -EINVAL;
+ 
+       if (state->buf_kern_start)
+               dst = state->buf_kern_start + state->buf_kern_offset;
+diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c 
b/net/ipv6/netfilter/ip6t_rpfilter.c
+index b12e61b7b16c..1c4a5de3f301 100644
+--- a/net/ipv6/netfilter/ip6t_rpfilter.c
++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
+@@ -48,10 +48,8 @@ static bool rpfilter_lookup_reverse6(struct net *net, const 
struct sk_buff *skb,
+       }
+ 
+       fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+-      if ((flags & XT_RPFILTER_LOOSE) == 0) {
++      if ((flags & XT_RPFILTER_LOOSE) == 0)
+               fl6.flowi6_oif = dev->ifindex;
+-              lookup_flags |= RT6_LOOKUP_F_IFACE;
+-      }
+ 
+       rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
+       if (rt->dst.error)
+diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c 
b/net/ipv6/netfilter/nft_fib_ipv6.c
+index 54b5899543ef..fd9a45cbd709 100644
+--- a/net/ipv6/netfilter/nft_fib_ipv6.c
++++ b/net/ipv6/netfilter/nft_fib_ipv6.c
+@@ -182,7 +182,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct 
nft_regs *regs,
+       }
+ 
+       *dest = 0;
+- again:
+       rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags);
+       if (rt->dst.error)
+               goto put_rt_err;
+@@ -191,15 +190,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct 
nft_regs *regs,
+       if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
+               goto put_rt_err;
+ 
+-      if (oif && oif != rt->rt6i_idev->dev) {
+-              /* multipath route? Try again with F_IFACE */
+-              if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) {
+-                      lookup_flags |= RT6_LOOKUP_F_IFACE;
+-                      fl6.flowi6_oif = oif->ifindex;
+-                      ip6_rt_put(rt);
+-                      goto again;
+-              }
+-      }
++      if (oif && oif != rt->rt6i_idev->dev)
++              goto put_rt_err;
+ 
+       switch (priv->result) {
+       case NFT_FIB_RESULT_OIF:
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index 17e95a0386b3..d6b012295b45 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -123,7 +123,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, 
int reverse)
+       struct flowi6 *fl6 = &fl->u.ip6;
+       int onlyproto = 0;
+       const struct ipv6hdr *hdr = ipv6_hdr(skb);
+-      u16 offset = sizeof(*hdr);
++      u32 offset = sizeof(*hdr);
+       struct ipv6_opt_hdr *exthdr;
+       const unsigned char *nh = skb_network_header(skb);
+       u16 nhoff = IP6CB(skb)->nhoff;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index e8f1556fa446..327ebe786eeb 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2384,8 +2384,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user 
*user, unsigned int len)
+                       struct ipvs_sync_daemon_cfg cfg;
+ 
+                       memset(&cfg, 0, sizeof(cfg));
+-                      strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
+-                              sizeof(cfg.mcast_ifn));
++                      ret = -EINVAL;
++                      if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
++                                  sizeof(cfg.mcast_ifn)) <= 0)
++                              goto out_dec;
+                       cfg.syncid = dm->syncid;
+                       ret = start_sync_thread(ipvs, &cfg, dm->state);
+               } else {
+@@ -2423,12 +2425,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user 
*user, unsigned int len)
+               }
+       }
+ 
++      if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
++          strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
++          IP_VS_SCHEDNAME_MAXLEN) {
++              ret = -EINVAL;
++              goto out_unlock;
++      }
++
+       /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
+       if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
+           usvc.protocol != IPPROTO_SCTP) {
+-              pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
++              pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
+                      usvc.protocol, &usvc.addr.ip,
+-                     ntohs(usvc.port), usvc.sched_name);
++                     ntohs(usvc.port));
+               ret = -EFAULT;
+               goto out_unlock;
+       }
+@@ -2850,7 +2859,7 @@ static const struct nla_policy 
ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
+ static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] 
= {
+       [IPVS_DAEMON_ATTR_STATE]        = { .type = NLA_U32 },
+       [IPVS_DAEMON_ATTR_MCAST_IFN]    = { .type = NLA_NUL_STRING,
+-                                          .len = IP_VS_IFNAME_MAXLEN },
++                                          .len = IP_VS_IFNAME_MAXLEN - 1 },
+       [IPVS_DAEMON_ATTR_SYNC_ID]      = { .type = NLA_U32 },
+       [IPVS_DAEMON_ATTR_SYNC_MAXLEN]  = { .type = NLA_U16 },
+       [IPVS_DAEMON_ATTR_MCAST_GROUP]  = { .type = NLA_U32 },
+@@ -2868,7 +2877,7 @@ static const struct nla_policy 
ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
+       [IPVS_SVC_ATTR_PORT]            = { .type = NLA_U16 },
+       [IPVS_SVC_ATTR_FWMARK]          = { .type = NLA_U32 },
+       [IPVS_SVC_ATTR_SCHED_NAME]      = { .type = NLA_NUL_STRING,
+-                                          .len = IP_VS_SCHEDNAME_MAXLEN },
++                                          .len = IP_VS_SCHEDNAME_MAXLEN - 1 },
+       [IPVS_SVC_ATTR_PE_NAME]         = { .type = NLA_NUL_STRING,
+                                           .len = IP_VS_PENAME_MAXLEN },
+       [IPVS_SVC_ATTR_FLAGS]           = { .type = NLA_BINARY,
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index cf30c440f7a7..85b549e84104 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -220,6 +220,34 @@ static int nft_delchain(struct nft_ctx *ctx)
+       return err;
+ }
+ 
++static void nft_rule_expr_activate(const struct nft_ctx *ctx,
++                                 struct nft_rule *rule)
++{
++      struct nft_expr *expr;
++
++      expr = nft_expr_first(rule);
++      while (expr != nft_expr_last(rule) && expr->ops) {
++              if (expr->ops->activate)
++                      expr->ops->activate(ctx, expr);
++
++              expr = nft_expr_next(expr);
++      }
++}
++
++static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
++                                   struct nft_rule *rule)
++{
++      struct nft_expr *expr;
++
++      expr = nft_expr_first(rule);
++      while (expr != nft_expr_last(rule) && expr->ops) {
++              if (expr->ops->deactivate)
++                      expr->ops->deactivate(ctx, expr);
++
++              expr = nft_expr_next(expr);
++      }
++}
++
+ static int
+ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
+ {
+@@ -265,6 +293,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct 
nft_rule *rule)
+               nft_trans_destroy(trans);
+               return err;
+       }
++      nft_rule_expr_deactivate(ctx, rule);
+ 
+       return 0;
+ }
+@@ -1237,8 +1266,10 @@ static void nft_chain_stats_replace(struct 
nft_base_chain *chain,
+               rcu_assign_pointer(chain->stats, newstats);
+               synchronize_rcu();
+               free_percpu(oldstats);
+-      } else
++      } else {
+               rcu_assign_pointer(chain->stats, newstats);
++              static_branch_inc(&nft_counters_enabled);
++      }
+ }
+ 
+ static void nf_tables_chain_destroy(struct nft_chain *chain)
+@@ -1947,6 +1978,7 @@ static const struct nla_policy 
nft_rule_policy[NFTA_RULE_MAX + 1] = {
+       [NFTA_RULE_POSITION]    = { .type = NLA_U64 },
+       [NFTA_RULE_USERDATA]    = { .type = NLA_BINARY,
+                                   .len = NFT_USERDATA_MAXLEN },
++      [NFTA_RULE_ID]          = { .type = NLA_U32 },
+ };
+ 
+ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
+@@ -2218,6 +2250,13 @@ static void nf_tables_rule_destroy(const struct nft_ctx 
*ctx,
+       kfree(rule);
+ }
+ 
++static void nf_tables_rule_release(const struct nft_ctx *ctx,
++                                 struct nft_rule *rule)
++{
++      nft_rule_expr_deactivate(ctx, rule);
++      nf_tables_rule_destroy(ctx, rule);
++}
++
+ #define NFT_RULE_MAXEXPRS     128
+ 
+ static struct nft_expr_info *info;
+@@ -2385,7 +2424,7 @@ static int nf_tables_newrule(struct net *net, struct 
sock *nlsk,
+       return 0;
+ 
+ err2:
+-      nf_tables_rule_destroy(&ctx, rule);
++      nf_tables_rule_release(&ctx, rule);
+ err1:
+       for (i = 0; i < n; i++) {
+               if (info[i].ops != NULL)
+@@ -3374,6 +3413,8 @@ static const struct nla_policy 
nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
+       [NFTA_SET_ELEM_TIMEOUT]         = { .type = NLA_U64 },
+       [NFTA_SET_ELEM_USERDATA]        = { .type = NLA_BINARY,
+                                           .len = NFT_USERDATA_MAXLEN },
++      [NFTA_SET_ELEM_EXPR]            = { .type = NLA_NESTED },
++      [NFTA_SET_ELEM_OBJREF]          = { .type = NLA_STRING },
+ };
+ 
+ static const struct nla_policy 
nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
+@@ -3961,8 +4002,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct 
nft_set *set,
+                       if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^
+                           nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) ||
+                           nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
+-                          nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF))
+-                              return -EBUSY;
++                          nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) {
++                              err = -EBUSY;
++                              goto err5;
++                      }
+                       if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+                            nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
+                            memcmp(nft_set_ext_data(ext),
+@@ -4054,7 +4097,7 @@ static int nf_tables_newsetelem(struct net *net, struct 
sock *nlsk,
+  *    NFT_GOTO verdicts. This function must be called on active data objects
+  *    from the second phase of the commit protocol.
+  */
+-static void nft_data_hold(const struct nft_data *data, enum nft_data_types 
type)
++void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
+ {
+       if (type == NFT_DATA_VERDICT) {
+               switch (data->verdict.code) {
+@@ -4571,7 +4614,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, 
struct netlink_callback *cb)
+                               if (idx > s_idx)
+                                       memset(&cb->args[1], 0,
+                                              sizeof(cb->args) - 
sizeof(cb->args[0]));
+-                              if (filter && filter->table[0] &&
++                              if (filter && filter->table &&
+                                   strcmp(filter->table, table->name))
+                                       goto cont;
+                               if (filter &&
+@@ -5221,10 +5264,12 @@ static int nf_tables_abort(struct net *net, struct 
sk_buff *skb)
+               case NFT_MSG_NEWRULE:
+                       trans->ctx.chain->use--;
+                       list_del_rcu(&nft_trans_rule(trans)->list);
++                      nft_rule_expr_deactivate(&trans->ctx, 
nft_trans_rule(trans));
+                       break;
+               case NFT_MSG_DELRULE:
+                       trans->ctx.chain->use++;
+                       nft_clear(trans->ctx.net, nft_trans_rule(trans));
++                      nft_rule_expr_activate(&trans->ctx, 
nft_trans_rule(trans));
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWSET:
+@@ -5798,7 +5843,7 @@ int __nft_release_basechain(struct nft_ctx *ctx)
+       list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
+               list_del(&rule->list);
+               ctx->chain->use--;
+-              nf_tables_rule_destroy(ctx, rule);
++              nf_tables_rule_release(ctx, rule);
+       }
+       list_del(&ctx->chain->list);
+       ctx->table->use--;
+@@ -5832,7 +5877,7 @@ static void __nft_release_afinfo(struct net *net, struct 
nft_af_info *afi)
+                       list_for_each_entry_safe(rule, nr, &chain->rules, list) 
{
+                               list_del(&rule->list);
+                               chain->use--;
+-                              nf_tables_rule_destroy(&ctx, rule);
++                              nf_tables_rule_release(&ctx, rule);
+                       }
+               }
+               list_for_each_entry_safe(set, ns, &table->sets, list) {
+diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
+index dfd0bf3810d2..32b7896929f3 100644
+--- a/net/netfilter/nf_tables_core.c
++++ b/net/netfilter/nf_tables_core.c
+@@ -119,14 +119,21 @@ DEFINE_STATIC_KEY_FALSE(nft_counters_enabled);
+ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
+                                           const struct nft_pktinfo *pkt)
+ {
++      struct nft_base_chain *base_chain;
+       struct nft_stats *stats;
+ 
++      base_chain = nft_base_chain(chain);
++      if (!base_chain->stats)
++              return;
++
+       local_bh_disable();
+-      stats = this_cpu_ptr(rcu_dereference(nft_base_chain(chain)->stats));
+-      u64_stats_update_begin(&stats->syncp);
+-      stats->pkts++;
+-      stats->bytes += pkt->skb->len;
+-      u64_stats_update_end(&stats->syncp);
++      stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
++      if (stats) {
++              u64_stats_update_begin(&stats->syncp);
++              stats->pkts++;
++              stats->bytes += pkt->skb->len;
++              u64_stats_update_end(&stats->syncp);
++      }
+       local_bh_enable();
+ }
+ 
+@@ -201,7 +208,8 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv)
+ 
+       switch (regs.verdict.code) {
+       case NFT_JUMP:
+-              BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
++              if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
++                      return NF_DROP;
+               jumpstack[stackptr].chain = chain;
+               jumpstack[stackptr].rule  = rule;
+               jumpstack[stackptr].rulenum = rulenum;
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index b89f4f65b2a0..3bd637eadc42 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -27,14 +27,31 @@ struct nft_xt {
+       struct list_head        head;
+       struct nft_expr_ops     ops;
+       unsigned int            refcnt;
++
++      /* Unlike other expressions, ops doesn't have static storage duration.
++       * nft core assumes they do.  We use kfree_rcu so that nft core can
++       * can check expr->ops->size even after nft_compat->destroy() frees
++       * the nft_xt struct that holds the ops structure.
++       */
++      struct rcu_head         rcu_head;
++};
++
++/* Used for matches where *info is larger than X byte */
++#define NFT_MATCH_LARGE_THRESH        192
++
++struct nft_xt_match_priv {
++      void *info;
+ };
+ 
+-static void nft_xt_put(struct nft_xt *xt)
++static bool nft_xt_put(struct nft_xt *xt)
+ {
+       if (--xt->refcnt == 0) {
+               list_del(&xt->head);
+-              kfree(xt);
++              kfree_rcu(xt, rcu_head);
++              return true;
+       }
++
++      return false;
+ }
+ 
+ static int nft_compat_chain_validate_dependency(const char *tablename,
+@@ -226,6 +243,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct 
nft_expr *expr,
+       struct xt_target *target = expr->ops->data;
+       struct xt_tgchk_param par;
+       size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
++      struct nft_xt *nft_xt;
+       u16 proto = 0;
+       bool inv = false;
+       union nft_entry e = {};
+@@ -236,25 +254,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct 
nft_expr *expr,
+       if (ctx->nla[NFTA_RULE_COMPAT]) {
+               ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, 
&inv);
+               if (ret < 0)
+-                      goto err;
++                      return ret;
+       }
+ 
+       nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
+ 
+       ret = xt_check_target(&par, size, proto, inv);
+       if (ret < 0)
+-              goto err;
++              return ret;
+ 
+       /* The standard target cannot be used */
+-      if (target->target == NULL) {
+-              ret = -EINVAL;
+-              goto err;
+-      }
++      if (!target->target)
++              return -EINVAL;
+ 
++      nft_xt = container_of(expr->ops, struct nft_xt, ops);
++      nft_xt->refcnt++;
+       return 0;
+-err:
+-      module_put(target->me);
+-      return ret;
+ }
+ 
+ static void
+@@ -271,8 +286,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct 
nft_expr *expr)
+       if (par.target->destroy != NULL)
+               par.target->destroy(&par);
+ 
+-      nft_xt_put(container_of(expr->ops, struct nft_xt, ops));
+-      module_put(target->me);
++      if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
++              module_put(target->me);
+ }
+ 
+ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
+@@ -316,11 +331,11 @@ static int nft_target_validate(const struct nft_ctx *ctx,
+       return 0;
+ }
+ 
+-static void nft_match_eval(const struct nft_expr *expr,
+-                         struct nft_regs *regs,
+-                         const struct nft_pktinfo *pkt)
++static void __nft_match_eval(const struct nft_expr *expr,
++                           struct nft_regs *regs,
++                           const struct nft_pktinfo *pkt,
++                           void *info)
+ {
+-      void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+       struct sk_buff *skb = pkt->skb;
+       bool ret;
+@@ -344,6 +359,22 @@ static void nft_match_eval(const struct nft_expr *expr,
+       }
+ }
+ 
++static void nft_match_large_eval(const struct nft_expr *expr,
++                               struct nft_regs *regs,
++                               const struct nft_pktinfo *pkt)
++{
++      struct nft_xt_match_priv *priv = nft_expr_priv(expr);
++
++      __nft_match_eval(expr, regs, pkt, priv->info);
++}
++
++static void nft_match_eval(const struct nft_expr *expr,
++                         struct nft_regs *regs,
++                         const struct nft_pktinfo *pkt)
++{
++      __nft_match_eval(expr, regs, pkt, nft_expr_priv(expr));
++}
++
+ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
+       [NFTA_MATCH_NAME]       = { .type = NLA_NUL_STRING },
+       [NFTA_MATCH_REV]        = { .type = NLA_U32 },
+@@ -404,13 +435,14 @@ static void match_compat_from_user(struct xt_match *m, 
void *in, void *out)
+ }
+ 
+ static int
+-nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+-              const struct nlattr * const tb[])
++__nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
++               const struct nlattr * const tb[],
++               void *info)
+ {
+-      void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+       struct xt_mtchk_param par;
+       size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
++      struct nft_xt *nft_xt;
+       u16 proto = 0;
+       bool inv = false;
+       union nft_entry e = {};
+@@ -421,26 +453,50 @@ nft_match_init(const struct nft_ctx *ctx, const struct 
nft_expr *expr,
+       if (ctx->nla[NFTA_RULE_COMPAT]) {
+               ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, 
&inv);
+               if (ret < 0)
+-                      goto err;
++                      return ret;
+       }
+ 
+       nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
+ 
+       ret = xt_check_match(&par, size, proto, inv);
+       if (ret < 0)
+-              goto err;
++              return ret;
+ 
++      nft_xt = container_of(expr->ops, struct nft_xt, ops);
++      nft_xt->refcnt++;
+       return 0;
+-err:
+-      module_put(match->me);
++}
++
++static int
++nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
++             const struct nlattr * const tb[])
++{
++      return __nft_match_init(ctx, expr, tb, nft_expr_priv(expr));
++}
++
++static int
++nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
++                   const struct nlattr * const tb[])
++{
++      struct nft_xt_match_priv *priv = nft_expr_priv(expr);
++      struct xt_match *m = expr->ops->data;
++      int ret;
++
++      priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
++      if (!priv->info)
++              return -ENOMEM;
++
++      ret = __nft_match_init(ctx, expr, tb, priv->info);
++      if (ret)
++              kfree(priv->info);
+       return ret;
+ }
+ 
+ static void
+-nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
++__nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
++                  void *info)
+ {
+       struct xt_match *match = expr->ops->data;
+-      void *info = nft_expr_priv(expr);
+       struct xt_mtdtor_param par;
+ 
+       par.net = ctx->net;
+@@ -450,13 +506,28 @@ nft_match_destroy(const struct nft_ctx *ctx, const 
struct nft_expr *expr)
+       if (par.match->destroy != NULL)
+               par.match->destroy(&par);
+ 
+-      nft_xt_put(container_of(expr->ops, struct nft_xt, ops));
+-      module_put(match->me);
++      if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
++              module_put(match->me);
+ }
+ 
+-static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
++static void
++nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
++{
++      __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
++}
++
++static void
++nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr 
*expr)
++{
++      struct nft_xt_match_priv *priv = nft_expr_priv(expr);
++
++      __nft_match_destroy(ctx, expr, priv->info);
++      kfree(priv->info);
++}
++
++static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
++                          void *info)
+ {
+-      void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+ 
+       if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
+@@ -470,6 +541,18 @@ static int nft_match_dump(struct sk_buff *skb, const 
struct nft_expr *expr)
+       return -1;
+ }
+ 
++static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
++{
++      return __nft_match_dump(skb, expr, nft_expr_priv(expr));
++}
++
++static int nft_match_large_dump(struct sk_buff *skb, const struct nft_expr *e)
++{
++      struct nft_xt_match_priv *priv = nft_expr_priv(e);
++
++      return __nft_match_dump(skb, e, priv->info);
++}
++
+ static int nft_match_validate(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nft_data **data)
+@@ -637,6 +720,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+ {
+       struct nft_xt *nft_match;
+       struct xt_match *match;
++      unsigned int matchsize;
+       char *mt_name;
+       u32 rev, family;
+       int err;
+@@ -654,13 +738,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+       list_for_each_entry(nft_match, &nft_match_list, head) {
+               struct xt_match *match = nft_match->ops.data;
+ 
+-              if (nft_match_cmp(match, mt_name, rev, family)) {
+-                      if (!try_module_get(match->me))
+-                              return ERR_PTR(-ENOENT);
+-
+-                      nft_match->refcnt++;
++              if (nft_match_cmp(match, mt_name, rev, family))
+                       return &nft_match->ops;
+-              }
+       }
+ 
+       match = xt_request_find_match(family, mt_name, rev);
+@@ -679,9 +758,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+               goto err;
+       }
+ 
+-      nft_match->refcnt = 1;
++      nft_match->refcnt = 0;
+       nft_match->ops.type = &nft_match_type;
+-      nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
+       nft_match->ops.eval = nft_match_eval;
+       nft_match->ops.init = nft_match_init;
+       nft_match->ops.destroy = nft_match_destroy;
+@@ -689,6 +767,18 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+       nft_match->ops.validate = nft_match_validate;
+       nft_match->ops.data = match;
+ 
++      matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
++      if (matchsize > NFT_MATCH_LARGE_THRESH) {
++              matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
++
++              nft_match->ops.eval = nft_match_large_eval;
++              nft_match->ops.init = nft_match_large_init;
++              nft_match->ops.destroy = nft_match_large_destroy;
++              nft_match->ops.dump = nft_match_large_dump;
++      }
++
++      nft_match->ops.size = matchsize;
++
+       list_add(&nft_match->head, &nft_match_list);
+ 
+       return &nft_match->ops;
+@@ -739,13 +829,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+       list_for_each_entry(nft_target, &nft_target_list, head) {
+               struct xt_target *target = nft_target->ops.data;
+ 
+-              if (nft_target_cmp(target, tg_name, rev, family)) {
+-                      if (!try_module_get(target->me))
+-                              return ERR_PTR(-ENOENT);
+-
+-                      nft_target->refcnt++;
++              if (nft_target_cmp(target, tg_name, rev, family))
+                       return &nft_target->ops;
+-              }
+       }
+ 
+       target = xt_request_find_target(family, tg_name, rev);
+@@ -764,7 +849,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+               goto err;
+       }
+ 
+-      nft_target->refcnt = 1;
++      nft_target->refcnt = 0;
+       nft_target->ops.type = &nft_target_type;
+       nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
+       nft_target->ops.init = nft_target_init;
+@@ -825,6 +910,32 @@ static int __init nft_compat_module_init(void)
+ 
+ static void __exit nft_compat_module_exit(void)
+ {
++      struct nft_xt *xt, *next;
++
++      /* list should be empty here, it can be non-empty only in case there
++       * was an error that caused nft_xt expr to not be initialized fully
++       * and noone else requested the same expression later.
++       *
++       * In this case, the lists contain 0-refcount entries that still
++       * hold module reference.
++       */
++      list_for_each_entry_safe(xt, next, &nft_target_list, head) {
++              struct xt_target *target = xt->ops.data;
++
++              if (WARN_ON_ONCE(xt->refcnt))
++                      continue;
++              module_put(target->me);
++              kfree(xt);
++      }
++
++      list_for_each_entry_safe(xt, next, &nft_match_list, head) {
++              struct xt_match *match = xt->ops.data;
++
++              if (WARN_ON_ONCE(xt->refcnt))
++                      continue;
++              module_put(match->me);
++              kfree(xt);
++      }
+       nfnetlink_subsys_unregister(&nfnl_compat_subsys);
+       nft_unregister_expr(&nft_target_type);
+       nft_unregister_expr(&nft_match_type);
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index 4717d7796927..aa87ff8beae8 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -69,8 +69,16 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
+       return err;
+ }
+ 
+-static void nft_immediate_destroy(const struct nft_ctx *ctx,
+-                                const struct nft_expr *expr)
++static void nft_immediate_activate(const struct nft_ctx *ctx,
++                                 const struct nft_expr *expr)
++{
++      const struct nft_immediate_expr *priv = nft_expr_priv(expr);
++
++      return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
++}
++
++static void nft_immediate_deactivate(const struct nft_ctx *ctx,
++                                   const struct nft_expr *expr)
+ {
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+ 
+@@ -108,7 +116,8 @@ static const struct nft_expr_ops nft_imm_ops = {
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
+       .eval           = nft_immediate_eval,
+       .init           = nft_immediate_init,
+-      .destroy        = nft_immediate_destroy,
++      .activate       = nft_immediate_activate,
++      .deactivate     = nft_immediate_deactivate,
+       .dump           = nft_immediate_dump,
+       .validate       = nft_immediate_validate,
+ };
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index a9fc298ef4c3..72f13a1144dd 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -51,10 +51,13 @@ static inline bool nft_limit_eval(struct nft_limit *limit, 
u64 cost)
+       return !limit->invert;
+ }
+ 
++/* Use same default as in iptables. */
++#define NFT_LIMIT_PKT_BURST_DEFAULT   5
++
+ static int nft_limit_init(struct nft_limit *limit,
+-                        const struct nlattr * const tb[])
++                        const struct nlattr * const tb[], bool pkts)
+ {
+-      u64 unit;
++      u64 unit, tokens;
+ 
+       if (tb[NFTA_LIMIT_RATE] == NULL ||
+           tb[NFTA_LIMIT_UNIT] == NULL)
+@@ -68,18 +71,25 @@ static int nft_limit_init(struct nft_limit *limit,
+ 
+       if (tb[NFTA_LIMIT_BURST])
+               limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+-      else
+-              limit->burst = 0;
++
++      if (pkts && limit->burst == 0)
++              limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
+ 
+       if (limit->rate + limit->burst < limit->rate)
+               return -EOVERFLOW;
+ 
+-      /* The token bucket size limits the number of tokens can be
+-       * accumulated. tokens_max specifies the bucket size.
+-       * tokens_max = unit * (rate + burst) / rate.
+-       */
+-      limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
+-                              limit->rate);
++      if (pkts) {
++              tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
++      } else {
++              /* The token bucket size limits the number of tokens can be
++               * accumulated. tokens_max specifies the bucket size.
++               * tokens_max = unit * (rate + burst) / rate.
++               */
++              tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
++                               limit->rate);
++      }
++
++      limit->tokens = tokens;
+       limit->tokens_max = limit->tokens;
+ 
+       if (tb[NFTA_LIMIT_FLAGS]) {
+@@ -144,7 +154,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+       struct nft_limit_pkts *priv = nft_expr_priv(expr);
+       int err;
+ 
+-      err = nft_limit_init(&priv->limit, tb);
++      err = nft_limit_init(&priv->limit, tb, true);
+       if (err < 0)
+               return err;
+ 
+@@ -185,7 +195,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
+ {
+       struct nft_limit *priv = nft_expr_priv(expr);
+ 
+-      return nft_limit_init(priv, tb);
++      return nft_limit_init(priv, tb, false);
+ }
+ 
+ static int nft_limit_bytes_dump(struct sk_buff *skb,
+@@ -246,7 +256,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx 
*ctx,
+       struct nft_limit_pkts *priv = nft_obj_data(obj);
+       int err;
+ 
+-      err = nft_limit_init(&priv->limit, tb);
++      err = nft_limit_init(&priv->limit, tb, true);
+       if (err < 0)
+               return err;
+ 
+@@ -289,7 +299,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx 
*ctx,
+ {
+       struct nft_limit *priv = nft_obj_data(obj);
+ 
+-      return nft_limit_init(priv, tb);
++      return nft_limit_init(priv, tb, false);
+ }
+ 
+ static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index 5a60eb23a7ed..c71184d4eac1 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -229,7 +229,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
+       struct sk_buff *skb = pkt->skb;
+       u32 *sreg = &regs->data[meta->sreg];
+       u32 value = *sreg;
+-      u8 pkt_type;
++      u8 value8;
+ 
+       switch (meta->key) {
+       case NFT_META_MARK:
+@@ -239,15 +239,17 @@ void nft_meta_set_eval(const struct nft_expr *expr,
+               skb->priority = value;
+               break;
+       case NFT_META_PKTTYPE:
+-              pkt_type = nft_reg_load8(sreg);
++              value8 = nft_reg_load8(sreg);
+ 
+-              if (skb->pkt_type != pkt_type &&
+-                  skb_pkt_type_ok(pkt_type) &&
++              if (skb->pkt_type != value8 &&
++                  skb_pkt_type_ok(value8) &&
+                   skb_pkt_type_ok(skb->pkt_type))
+-                      skb->pkt_type = pkt_type;
++                      skb->pkt_type = value8;
+               break;
+       case NFT_META_NFTRACE:
+-              skb->nf_trace = !!value;
++              value8 = nft_reg_load8(sreg);
++
++              skb->nf_trace = !!value8;
+               break;
+       default:
+               WARN_ON(1);
+diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
+index a59db7c45a65..81ede20f49d7 100644
+--- a/tools/perf/tests/topology.c
++++ b/tools/perf/tests/topology.c
+@@ -66,6 +66,27 @@ static int check_cpu_topology(char *path, struct cpu_map 
*map)
+       session = perf_session__new(&file, false, NULL);
+       TEST_ASSERT_VAL("can't get session", session);
+ 
++      /* On platforms with large numbers of CPUs process_cpu_topology()
++       * might issue an error while reading the perf.data file section
++       * HEADER_CPU_TOPOLOGY and the cpu_topology_map pointed to by member
++       * cpu is a NULL pointer.
++       * Example: On s390
++       *   CPU 0 is on core_id 0 and physical_package_id 6
++       *   CPU 1 is on core_id 1 and physical_package_id 3
++       *
++       *   Core_id and physical_package_id are platform and architecture
++       *   dependend and might have higher numbers than the CPU id.
++       *   This actually depends on the configuration.
++       *
++       *  In this case process_cpu_topology() prints error message:
++       *  "socket_id number is too big. You may need to upgrade the
++       *  perf tool."
++       *
++       *  This is the reason why this test might be skipped.
++       */
++      if (!session->header.env.cpu)
++              return TEST_SKIP;
++
+       for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
+               if (!cpu_map__has(map, i))
+                       continue;
+@@ -91,7 +112,7 @@ int test__session_topology(struct test *test 
__maybe_unused, int subtest __maybe
+ {
+       char path[PATH_MAX];
+       struct cpu_map *map;
+-      int ret = -1;
++      int ret = TEST_FAIL;
+ 
+       TEST_ASSERT_VAL("can't get templ file", !get_temp(path));
+ 
+@@ -106,12 +127,9 @@ int test__session_topology(struct test *test 
__maybe_unused, int subtest __maybe
+               goto free_path;
+       }
+ 
+-      if (check_cpu_topology(path, map))
+-              goto free_map;
+-      ret = 0;
+-
+-free_map:
++      ret = check_cpu_topology(path, map);
+       cpu_map__put(map);
++
+ free_path:
+       unlink(path);
+       return ret;
+diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
+index 72c107fcbc5a..c02d2cfd3aea 100644
+--- a/tools/perf/util/bpf-loader.c
++++ b/tools/perf/util/bpf-loader.c
+@@ -66,7 +66,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, 
const char *name)
+       }
+ 
+       obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
+-      if (IS_ERR(obj)) {
++      if (IS_ERR_OR_NULL(obj)) {
+               pr_debug("bpf: failed to load buffer\n");
+               return ERR_PTR(-EINVAL);
+       }
+@@ -102,14 +102,14 @@ struct bpf_object *bpf__prepare_load(const char 
*filename, bool source)
+                       pr_debug("bpf: successfull builtin compilation\n");
+               obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
+ 
+-              if (!IS_ERR(obj) && llvm_param.dump_obj)
++              if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
+                       llvm__dump_obj(filename, obj_buf, obj_buf_sz);
+ 
+               free(obj_buf);
+       } else
+               obj = bpf_object__open(filename);
+ 
+-      if (IS_ERR(obj)) {
++      if (IS_ERR_OR_NULL(obj)) {
+               pr_debug("bpf: failed to load %s\n", filename);
+               return obj;
+       }

Reply via email to