commit:     87583f9e386eb687f2ebb5dcd54ebeee1a152485
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 13 22:04:52 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 13 22:04:52 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=87583f9e

proj/linux-patches: Linux patch 4.9.163

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1162_linux-4.9.163.patch | 3813 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3817 insertions(+)

diff --git a/0000_README b/0000_README
index 44fb51a..6d74c9a 100644
--- a/0000_README
+++ b/0000_README
@@ -691,6 +691,10 @@ Patch:  1161_linux-4.9.162.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.162
 
+Patch:  1162_linux-4.9.163.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.163
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1162_linux-4.9.163.patch b/1162_linux-4.9.163.patch
new file mode 100644
index 0000000..f992152
--- /dev/null
+++ b/1162_linux-4.9.163.patch
@@ -0,0 +1,3813 @@
+diff --git a/Makefile b/Makefile
+index fce163d09139..8a5330e279ad 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 162
++SUBLEVEL = 163
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/boot/dts/exynos3250.dtsi 
b/arch/arm/boot/dts/exynos3250.dtsi
+index 2a531beef4c7..51dbd8cb91cb 100644
+--- a/arch/arm/boot/dts/exynos3250.dtsi
++++ b/arch/arm/boot/dts/exynos3250.dtsi
+@@ -170,6 +170,9 @@
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       interrupt-parent = <&gic>;
++                      clock-names = "clkout8";
++                      clocks = <&cmu CLK_FIN_PLL>;
++                      #clock-cells = <1>;
+               };
+ 
+               mipi_phy: video-phy {
+diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi 
b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+index 5282d69e55bd..2d83cbf672b2 100644
+--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+@@ -70,7 +70,7 @@
+       };
+ 
+       emmc_pwrseq: pwrseq {
+-              pinctrl-0 = <&sd1_cd>;
++              pinctrl-0 = <&emmc_rstn>;
+               pinctrl-names = "default";
+               compatible = "mmc-pwrseq-emmc";
+               reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
+@@ -161,12 +161,6 @@
+       cpu0-supply = <&buck2_reg>;
+ };
+ 
+-/* RSTN signal for eMMC */
+-&sd1_cd {
+-      samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+-      samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+-};
+-
+ &pinctrl_1 {
+       gpio_power_key: power_key {
+               samsung,pins = "gpx1-3";
+@@ -184,6 +178,11 @@
+               samsung,pins = "gpx3-7";
+               samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+       };
++
++      emmc_rstn: emmc-rstn {
++              samsung,pins = "gpk1-2";
++              samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
++      };
+ };
+ 
+ &ehci {
+diff --git a/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi 
b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi
+new file mode 100644
+index 000000000000..c8771c660550
+--- /dev/null
++++ b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi
+@@ -0,0 +1,25 @@
++/*
++ * Device tree sources for Exynos5420 TMU sensor configuration
++ *
++ * Copyright (c) 2014 Lukasz Majewski <l.majew...@samsung.com>
++ * Copyright (c) 2017 Krzysztof Kozlowski <k...@kernel.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#include <dt-bindings/thermal/thermal_exynos.h>
++
++#thermal-sensor-cells = <0>;
++samsung,tmu_gain = <8>;
++samsung,tmu_reference_voltage = <16>;
++samsung,tmu_noise_cancel_mode = <4>;
++samsung,tmu_efuse_value = <55>;
++samsung,tmu_min_efuse_value = <0>;
++samsung,tmu_max_efuse_value = <100>;
++samsung,tmu_first_point_trim = <25>;
++samsung,tmu_second_point_trim = <85>;
++samsung,tmu_default_temp_offset = <50>;
++samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
+diff --git a/arch/arm/boot/dts/exynos5420.dtsi 
b/arch/arm/boot/dts/exynos5420.dtsi
+index 00c4cfa54839..52f3d911f67f 100644
+--- a/arch/arm/boot/dts/exynos5420.dtsi
++++ b/arch/arm/boot/dts/exynos5420.dtsi
+@@ -694,7 +694,7 @@
+                       interrupts = <0 65 0>;
+                       clocks = <&clock CLK_TMU>;
+                       clock-names = "tmu_apbif";
+-                      #include "exynos4412-tmu-sensor-conf.dtsi"
++                      #include "exynos5420-tmu-sensor-conf.dtsi"
+               };
+ 
+               tmu_cpu1: tmu@10064000 {
+@@ -703,7 +703,7 @@
+                       interrupts = <0 183 0>;
+                       clocks = <&clock CLK_TMU>;
+                       clock-names = "tmu_apbif";
+-                      #include "exynos4412-tmu-sensor-conf.dtsi"
++                      #include "exynos5420-tmu-sensor-conf.dtsi"
+               };
+ 
+               tmu_cpu2: tmu@10068000 {
+@@ -712,7 +712,7 @@
+                       interrupts = <0 184 0>;
+                       clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
+                       clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+-                      #include "exynos4412-tmu-sensor-conf.dtsi"
++                      #include "exynos5420-tmu-sensor-conf.dtsi"
+               };
+ 
+               tmu_cpu3: tmu@1006c000 {
+@@ -721,7 +721,7 @@
+                       interrupts = <0 185 0>;
+                       clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
+                       clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+-                      #include "exynos4412-tmu-sensor-conf.dtsi"
++                      #include "exynos5420-tmu-sensor-conf.dtsi"
+               };
+ 
+               tmu_gpu: tmu@100a0000 {
+@@ -730,7 +730,7 @@
+                       interrupts = <0 215 0>;
+                       clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
+                       clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+-                      #include "exynos4412-tmu-sensor-conf.dtsi"
++                      #include "exynos5420-tmu-sensor-conf.dtsi"
+               };
+ 
+               sysmmu_g2dr: sysmmu@0x10A60000 {
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 56be67ecf0fa..d69adfb3d79e 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -32,6 +32,7 @@
+  * features make this path too inefficient.
+  */
+ ret_fast_syscall:
++__ret_fast_syscall:
+  UNWIND(.fnstart      )
+  UNWIND(.cantunwind   )
+       disable_irq_notrace                     @ disable interrupts
+@@ -57,6 +58,7 @@ fast_work_pending:
+  * r0 first to avoid needing to save registers around each C function call.
+  */
+ ret_fast_syscall:
++__ret_fast_syscall:
+  UNWIND(.fnstart      )
+  UNWIND(.cantunwind   )
+       str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
+@@ -223,7 +225,7 @@ local_restart:
+       tst     r10, #_TIF_SYSCALL_WORK         @ are we tracing syscalls?
+       bne     __sys_trace
+ 
+-      invoke_syscall tbl, scno, r10, ret_fast_syscall
++      invoke_syscall tbl, scno, r10, __ret_fast_syscall
+ 
+       add     r1, sp, #S_OFF
+ 2:    cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
+index ba13f793fbce..b92673efffff 100644
+--- a/arch/arm/plat-pxa/ssp.c
++++ b/arch/arm/plat-pxa/ssp.c
+@@ -237,8 +237,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
+       if (ssp == NULL)
+               return -ENODEV;
+ 
+-      iounmap(ssp->mmio_base);
+-
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(res->start, resource_size(res));
+ 
+@@ -248,7 +246,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
+       list_del(&ssp->node);
+       mutex_unlock(&ssp_lock);
+ 
+-      kfree(ssp);
+       return 0;
+ }
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi 
b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 2c93de7fffe5..bdea2d6fde94 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -219,7 +219,7 @@
+               compatible = "simple-bus";
+ 
+               intc: interrupt-controller@9bc0000 {
+-                      compatible = "arm,gic-v3";
++                      compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
+                       #interrupt-cells = <3>;
+                       interrupt-controller;
+                       #redistributor-regions = <1>;
+diff --git a/arch/arm64/kernel/probes/kprobes.c 
b/arch/arm64/kernel/probes/kprobes.c
+index 30bcae0aef2a..d2b1b624ddc3 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -546,13 +546,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
+           addr < (unsigned long)__entry_text_end) ||
+           (addr >= (unsigned long)__idmap_text_start &&
+           addr < (unsigned long)__idmap_text_end) ||
++          (addr >= (unsigned long)__hyp_text_start &&
++          addr < (unsigned long)__hyp_text_end) ||
+           !!search_exception_tables(addr))
+               return true;
+ 
+       if (!is_kernel_in_hyp_mode()) {
+-              if ((addr >= (unsigned long)__hyp_text_start &&
+-                  addr < (unsigned long)__hyp_text_end) ||
+-                  (addr >= (unsigned long)__hyp_idmap_text_start &&
++              if ((addr >= (unsigned long)__hyp_idmap_text_start &&
+                   addr < (unsigned long)__hyp_idmap_text_end))
+                       return true;
+       }
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index 2b0a371b42af..24444ed456c8 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
+ void __init init_IRQ(void)
+ {
+       int i;
++      unsigned int order = get_order(IRQ_STACK_SIZE);
+ 
+       for (i = 0; i < NR_IRQS; i++)
+               irq_set_noprobe(i);
+@@ -62,8 +63,7 @@ void __init init_IRQ(void)
+       arch_init_irq();
+ 
+       for_each_possible_cpu(i) {
+-              int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
+-              void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
++              void *s = (void *)__get_free_pages(GFP_KERNEL, order);
+ 
+               irq_stack[i] = s;
+               pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index 1cc133e7026f..fffd031dc6b6 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -344,7 +344,7 @@ static inline int is_sp_move_ins(union mips_instruction 
*ip)
+ static int get_frame_info(struct mips_frame_info *info)
+ {
+       bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
+-      union mips_instruction insn, *ip, *ip_end;
++      union mips_instruction insn, *ip;
+       const unsigned int max_insns = 128;
+       unsigned int last_insn_size = 0;
+       unsigned int i;
+@@ -356,10 +356,9 @@ static int get_frame_info(struct mips_frame_info *info)
+       if (!ip)
+               goto err;
+ 
+-      ip_end = (void *)ip + info->func_size;
+-
+-      for (i = 0; i < max_insns && ip < ip_end; i++) {
++      for (i = 0; i < max_insns; i++) {
+               ip = (void *)ip + last_insn_size;
++
+               if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
+                       insn.halfword[0] = 0;
+                       insn.halfword[1] = ip->halfword[0];
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index ab04751a12b6..1e9f610d36a4 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1942,7 +1942,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
+  */
+ static void free_fake_cpuc(struct cpu_hw_events *cpuc)
+ {
+-      kfree(cpuc->shared_regs);
++      intel_cpuc_finish(cpuc);
+       kfree(cpuc);
+ }
+ 
+@@ -1954,14 +1954,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
+       cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
+       if (!cpuc)
+               return ERR_PTR(-ENOMEM);
+-
+-      /* only needed, if we have extra_regs */
+-      if (x86_pmu.extra_regs) {
+-              cpuc->shared_regs = allocate_shared_regs(cpu);
+-              if (!cpuc->shared_regs)
+-                      goto error;
+-      }
+       cpuc->is_fake = 1;
++
++      if (intel_cpuc_prepare(cpuc, cpu))
++              goto error;
++
+       return cpuc;
+ error:
+       free_fake_cpuc(cpuc);
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index f0639c8ebcb6..098ab775135f 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2492,6 +2492,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
+       raw_spin_unlock(&excl_cntrs->lock);
+ }
+ 
++static struct event_constraint *
++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int 
idx)
++{
++      WARN_ON_ONCE(!cpuc->constraint_list);
++
++      if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
++              struct event_constraint *cx;
++
++              /*
++               * grab pre-allocated constraint entry
++               */
++              cx = &cpuc->constraint_list[idx];
++
++              /*
++               * initialize dynamic constraint
++               * with static constraint
++               */
++              *cx = *c;
++
++              /*
++               * mark constraint as dynamic
++               */
++              cx->flags |= PERF_X86_EVENT_DYNAMIC;
++              c = cx;
++      }
++
++      return c;
++}
++
+ static struct event_constraint *
+ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event 
*event,
+                          int idx, struct event_constraint *c)
+@@ -2522,27 +2551,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, 
struct perf_event *event,
+        * only needed when constraint has not yet
+        * been cloned (marked dynamic)
+        */
+-      if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+-              struct event_constraint *cx;
+-
+-              /*
+-               * grab pre-allocated constraint entry
+-               */
+-              cx = &cpuc->constraint_list[idx];
+-
+-              /*
+-               * initialize dynamic constraint
+-               * with static constraint
+-               */
+-              *cx = *c;
+-
+-              /*
+-               * mark constraint as dynamic, so we
+-               * can free it later on
+-               */
+-              cx->flags |= PERF_X86_EVENT_DYNAMIC;
+-              c = cx;
+-      }
++      c = dyn_constraint(cpuc, c, idx);
+ 
+       /*
+        * From here on, the constraint is dynamic.
+@@ -3093,7 +3102,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
+       return x86_event_sysfs_show(page, config, event);
+ }
+ 
+-struct intel_shared_regs *allocate_shared_regs(int cpu)
++static struct intel_shared_regs *allocate_shared_regs(int cpu)
+ {
+       struct intel_shared_regs *regs;
+       int i;
+@@ -3125,10 +3134,9 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int 
cpu)
+       return c;
+ }
+ 
+-static int intel_pmu_cpu_prepare(int cpu)
+-{
+-      struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ 
++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
++{
+       if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
+               cpuc->shared_regs = allocate_shared_regs(cpu);
+               if (!cpuc->shared_regs)
+@@ -3138,7 +3146,7 @@ static int intel_pmu_cpu_prepare(int cpu)
+       if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+               size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
+ 
+-              cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
++              cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, 
cpu_to_node(cpu));
+               if (!cpuc->constraint_list)
+                       goto err_shared_regs;
+ 
+@@ -3163,6 +3171,11 @@ err:
+       return -ENOMEM;
+ }
+ 
++static int intel_pmu_cpu_prepare(int cpu)
++{
++      return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
++}
++
+ static void intel_pmu_cpu_starting(int cpu)
+ {
+       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+@@ -3218,9 +3231,8 @@ static void intel_pmu_cpu_starting(int cpu)
+       }
+ }
+ 
+-static void free_excl_cntrs(int cpu)
++static void free_excl_cntrs(struct cpu_hw_events *cpuc)
+ {
+-      struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       struct intel_excl_cntrs *c;
+ 
+       c = cpuc->excl_cntrs;
+@@ -3238,9 +3250,8 @@ static void intel_pmu_cpu_dying(int cpu)
+       fini_debug_store_on_cpu(cpu);
+ }
+ 
+-static void intel_pmu_cpu_dead(int cpu)
++void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+ {
+-      struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       struct intel_shared_regs *pc;
+ 
+       pc = cpuc->shared_regs;
+@@ -3250,7 +3261,12 @@ static void intel_pmu_cpu_dead(int cpu)
+               cpuc->shared_regs = NULL;
+       }
+ 
+-      free_excl_cntrs(cpu);
++      free_excl_cntrs(cpuc);
++}
++
++static void intel_pmu_cpu_dead(int cpu)
++{
++      intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
+ }
+ 
+ static void intel_pmu_sched_task(struct perf_event_context *ctx,
+@@ -4132,7 +4148,7 @@ static __init int fixup_ht_bug(void)
+       get_online_cpus();
+ 
+       for_each_online_cpu(c) {
+-              free_excl_cntrs(c);
++              free_excl_cntrs(&per_cpu(cpu_hw_events, c));
+       }
+ 
+       put_online_cpus();
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index 5c21680b0a69..1ce6ae35f6a2 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -865,7 +865,8 @@ struct event_constraint *
+ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event);
+ 
+-struct intel_shared_regs *allocate_shared_regs(int cpu);
++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
+ 
+ int intel_pmu_init(void);
+ 
+@@ -995,9 +996,13 @@ static inline int intel_pmu_init(void)
+       return 0;
+ }
+ 
+-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
++static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
++{
++      return 0;
++}
++
++static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
+ {
+-      return NULL;
+ }
+ 
+ static inline int is_ht_workaround_enabled(void)
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index c56c24347f15..98444b77fbe3 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -314,6 +314,7 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW     (18*32+ 2) /* AVX-512 Neural Network 
Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS     (18*32+ 3) /* AVX-512 Multiply 
Accumulation Single precision */
++#define X86_FEATURE_TSX_FORCE_ABORT   (18*32+13) /* "" TSX_FORCE_ABORT */
+ #define X86_FEATURE_PCONFIG           (18*32+18) /* Intel PCONFIG */
+ #define X86_FEATURE_SPEC_CTRL         (18*32+26) /* "" Speculation Control 
(IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP               (18*32+27) /* "" Single Thread 
Indirect Branch Predictors */
+diff --git a/arch/x86/include/asm/msr-index.h 
b/arch/x86/include/asm/msr-index.h
+index bbbb9b14ade1..9963e21ac443 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -575,6 +575,12 @@
+ 
+ #define MSR_IA32_TSC_DEADLINE         0x000006E0
+ 
++
++#define MSR_TSX_FORCE_ABORT           0x0000010F
++
++#define MSR_TFA_RTM_FORCE_ABORT_BIT   0
++#define MSR_TFA_RTM_FORCE_ABORT               
BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
++
+ /* P4/Xeon+ specific */
+ #define MSR_IA32_MCG_EAX              0x00000180
+ #define MSR_IA32_MCG_EBX              0x00000181
+diff --git a/arch/x86/include/asm/page_64_types.h 
b/arch/x86/include/asm/page_64_types.h
+index 9215e0527647..390fdd39e0e2 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -6,7 +6,11 @@
+ #endif
+ 
+ #ifdef CONFIG_KASAN
++#ifdef CONFIG_KASAN_EXTRA
++#define KASAN_STACK_ORDER 2
++#else
+ #define KASAN_STACK_ORDER 1
++#endif
+ #else
+ #define KASAN_STACK_ORDER 0
+ #endif
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 4c2648b96c9a..be6d0543e626 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -765,11 +765,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
+       set_cpu_cap(c, X86_FEATURE_ZEN);
+-      /*
+-       * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+-       * all up to and including B1.
+-       */
+-      if (c->x86_model <= 1 && c->x86_stepping <= 1)
++
++      /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
++      if (!cpu_has(c, X86_FEATURE_CPB))
+               set_cpu_cap(c, X86_FEATURE_CPB);
+ }
+ 
+diff --git a/arch/x86/kernel/kexec-bzimage64.c 
b/arch/x86/kernel/kexec-bzimage64.c
+index 490f9be3fda2..167ecc270ca5 100644
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long 
params_load_addr,
+       struct efi_info *current_ei = &boot_params.efi_info;
+       struct efi_info *ei = &params->efi_info;
+ 
++      if (!efi_enabled(EFI_RUNTIME_SERVICES))
++              return 0;
++
+       if (!current_ei->efi_memmap_size)
+               return 0;
+ 
+diff --git a/arch/xtensa/configs/smp_lx200_defconfig 
b/arch/xtensa/configs/smp_lx200_defconfig
+index 14e3ca353ac8..5035b86a2e49 100644
+--- a/arch/xtensa/configs/smp_lx200_defconfig
++++ b/arch/xtensa/configs/smp_lx200_defconfig
+@@ -34,6 +34,7 @@ CONFIG_SMP=y
+ CONFIG_HOTPLUG_CPU=y
+ # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
+ # CONFIG_PCI is not set
++CONFIG_VECTORS_OFFSET=0x00002000
+ CONFIG_XTENSA_PLATFORM_XTFPGA=y
+ CONFIG_CMDLINE_BOOL=y
+ CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 
console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
+diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
+index 27c8e07ace43..29f445b410b3 100644
+--- a/arch/xtensa/kernel/head.S
++++ b/arch/xtensa/kernel/head.S
+@@ -281,12 +281,13 @@ should_never_return:
+ 
+       movi    a2, cpu_start_ccount
+ 1:
++      memw
+       l32i    a3, a2, 0
+       beqi    a3, 0, 1b
+       movi    a3, 0
+       s32i    a3, a2, 0
+-      memw
+ 1:
++      memw
+       l32i    a3, a2, 0
+       beqi    a3, 0, 1b
+       wsr     a3, ccount
+@@ -323,11 +324,13 @@ ENTRY(cpu_restart)
+       rsr     a0, prid
+       neg     a2, a0
+       movi    a3, cpu_start_id
++      memw
+       s32i    a2, a3, 0
+ #if XCHAL_DCACHE_IS_WRITEBACK
+       dhwbi   a3, 0
+ #endif
+ 1:
++      memw
+       l32i    a2, a3, 0
+       dhi     a3, 0
+       bne     a2, a0, 1b
+diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
+index fc4ad21a5ed4..44805673a250 100644
+--- a/arch/xtensa/kernel/smp.c
++++ b/arch/xtensa/kernel/smp.c
+@@ -80,7 +80,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ {
+       unsigned i;
+ 
+-      for (i = 0; i < max_cpus; ++i)
++      for_each_possible_cpu(i)
+               set_cpu_present(i, true);
+ }
+ 
+@@ -93,6 +93,11 @@ void __init smp_init_cpus(void)
+       pr_info("%s: Core Count = %d\n", __func__, ncpus);
+       pr_info("%s: Core Id = %d\n", __func__, core_id);
+ 
++      if (ncpus > NR_CPUS) {
++              ncpus = NR_CPUS;
++              pr_info("%s: limiting core count by %d\n", __func__, ncpus);
++      }
++
+       for (i = 0; i < ncpus; ++i)
+               set_cpu_possible(i, true);
+ }
+@@ -192,9 +197,11 @@ static int boot_secondary(unsigned int cpu, struct 
task_struct *ts)
+       int i;
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+-      cpu_start_id = cpu;
+-      system_flush_invalidate_dcache_range(
+-                      (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
++      WRITE_ONCE(cpu_start_id, cpu);
++      /* Pairs with the third memw in the cpu_restart */
++      mb();
++      system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
++                                           sizeof(cpu_start_id));
+ #endif
+       smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
+ 
+@@ -203,18 +210,21 @@ static int boot_secondary(unsigned int cpu, struct 
task_struct *ts)
+                       ccount = get_ccount();
+               while (!ccount);
+ 
+-              cpu_start_ccount = ccount;
++              WRITE_ONCE(cpu_start_ccount, ccount);
+ 
+-              while (time_before(jiffies, timeout)) {
++              do {
++                      /*
++                       * Pairs with the first two memws in the
++                       * .Lboot_secondary.
++                       */
+                       mb();
+-                      if (!cpu_start_ccount)
+-                              break;
+-              }
++                      ccount = READ_ONCE(cpu_start_ccount);
++              } while (ccount && time_before(jiffies, timeout));
+ 
+-              if (cpu_start_ccount) {
++              if (ccount) {
+                       smp_call_function_single(0, mx_cpu_stop,
+-                                      (void *)cpu, 1);
+-                      cpu_start_ccount = 0;
++                                               (void *)cpu, 1);
++                      WRITE_ONCE(cpu_start_ccount, 0);
+                       return -EIO;
+               }
+       }
+@@ -234,6 +244,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
+       pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
+                       __func__, cpu, idle, start_info.stack);
+ 
++      init_completion(&cpu_running);
+       ret = boot_secondary(cpu, idle);
+       if (ret == 0) {
+               wait_for_completion_timeout(&cpu_running,
+@@ -295,8 +306,10 @@ void __cpu_die(unsigned int cpu)
+       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+       while (time_before(jiffies, timeout)) {
+               system_invalidate_dcache_range((unsigned long)&cpu_start_id,
+-                              sizeof(cpu_start_id));
+-              if (cpu_start_id == -cpu) {
++                                             sizeof(cpu_start_id));
++              /* Pairs with the second memw in the cpu_restart */
++              mb();
++              if (READ_ONCE(cpu_start_id) == -cpu) {
+                       platform_cpu_kill(cpu);
+                       return;
+               }
+diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
+index be81e69b25bc..2251a6e0973a 100644
+--- a/arch/xtensa/kernel/time.c
++++ b/arch/xtensa/kernel/time.c
+@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device 
*evt)
+               container_of(evt, struct ccount_timer, evt);
+ 
+       if (timer->irq_enabled) {
+-              disable_irq(evt->irq);
++              disable_irq_nosync(evt->irq);
+               timer->irq_enabled = 0;
+       }
+       return 0;
+diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
+index 14790304b84b..9fcd51095d13 100644
+--- a/drivers/char/applicom.c
++++ b/drivers/char/applicom.c
+@@ -32,6 +32,7 @@
+ #include <linux/wait.h>
+ #include <linux/init.h>
+ #include <linux/fs.h>
++#include <linux/nospec.h>
+ 
+ #include <asm/io.h>
+ #include <asm/uaccess.h>
+@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char 
__user *buf, size_t count,
+       TicCard = st_loc.tic_des_from_pc;       /* tic number to send           
 */
+       IndexCard = NumCard - 1;
+ 
+-      if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
++      if (IndexCard >= MAX_BOARD)
++              return -EINVAL;
++      IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++      if (!apbs[IndexCard].RamIO)
+               return -EINVAL;
+ 
+ #ifdef DEBUG
+@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, 
unsigned long arg)
+       unsigned char IndexCard;
+       void __iomem *pmem;
+       int ret = 0;
++      static int warncount = 10;
+       volatile unsigned char byte_reset_it;
+       struct st_ram_io *adgl;
+       void __user *argp = (void __user *)arg;
+@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int 
cmd, unsigned long arg)
+       mutex_lock(&ac_mutex);  
+       IndexCard = adgl->num_card-1;
+        
+-      if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
+-              static int warncount = 10;
+-              if (warncount) {
+-                      printk( KERN_WARNING "APPLICOM driver IOCTL, bad board 
number %d\n",(int)IndexCard+1);
+-                      warncount--;
+-              }
+-              kfree(adgl);
+-              mutex_unlock(&ac_mutex);
+-              return -EINVAL;
+-      }
++      if (cmd != 6 && IndexCard >= MAX_BOARD)
++              goto err;
++      IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++      if (cmd != 6 && !apbs[IndexCard].RamIO)
++              goto err;
+ 
+       switch (cmd) {
+               
+@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, 
unsigned long arg)
+       kfree(adgl);
+       mutex_unlock(&ac_mutex);
+       return 0;
++
++err:
++      if (warncount) {
++              pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
++                      (int)IndexCard + 1);
++              warncount--;
++      }
++      kfree(adgl);
++      mutex_unlock(&ac_mutex);
++      return -EINVAL;
++
+ }
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 61fe4bbc6dc0..a38a23f0b3f4 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -528,13 +528,13 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+  *                          SYSFS INTERFACE                          *
+  *********************************************************************/
+ static ssize_t show_boost(struct kobject *kobj,
+-                               struct attribute *attr, char *buf)
++                        struct kobj_attribute *attr, char *buf)
+ {
+       return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+ }
+ 
+-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+-                                const char *buf, size_t count)
++static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
++                         const char *buf, size_t count)
+ {
+       int ret, enable;
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index a59ae8e24d3d..f690085b1ad9 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -659,13 +659,13 @@ static void __init intel_pstate_debug_expose_params(void)
+ /************************** sysfs begin ************************/
+ #define show_one(file_name, object)                                   \
+       static ssize_t show_##file_name                                 \
+-      (struct kobject *kobj, struct attribute *attr, char *buf)       \
++      (struct kobject *kobj, struct kobj_attribute *attr, char *buf)  \
+       {                                                               \
+               return sprintf(buf, "%u\n", limits->object);            \
+       }
+ 
+ static ssize_t show_turbo_pct(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       struct cpudata *cpu;
+       int total, no_turbo, turbo_pct;
+@@ -681,7 +681,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
+ }
+ 
+ static ssize_t show_num_pstates(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       struct cpudata *cpu;
+       int total;
+@@ -692,7 +692,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
+ }
+ 
+ static ssize_t show_no_turbo(struct kobject *kobj,
+-                           struct attribute *attr, char *buf)
++                           struct kobj_attribute *attr, char *buf)
+ {
+       ssize_t ret;
+ 
+@@ -705,7 +705,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
+       return ret;
+ }
+ 
+-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
++static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
+                             const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -729,7 +729,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct 
attribute *b,
+       return count;
+ }
+ 
+-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
+                                 const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -753,7 +753,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, 
struct attribute *b,
+       return count;
+ }
+ 
+-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
+                                 const char *buf, size_t count)
+ {
+       unsigned int input;
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index ee7b48d5243c..b222dd7afe8e 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -203,6 +203,7 @@ struct at_xdmac_chan {
+       u32                             save_cim;
+       u32                             save_cnda;
+       u32                             save_cndc;
++      u32                             irq_status;
+       unsigned long                   status;
+       struct tasklet_struct           tasklet;
+       struct dma_slave_config         sconfig;
+@@ -1582,8 +1583,8 @@ static void at_xdmac_tasklet(unsigned long data)
+       struct at_xdmac_desc    *desc;
+       u32                     error_mask;
+ 
+-      dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
+-               __func__, atchan->status);
++      dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
++              __func__, atchan->irq_status);
+ 
+       error_mask = AT_XDMAC_CIS_RBEIS
+                    | AT_XDMAC_CIS_WBEIS
+@@ -1591,15 +1592,15 @@ static void at_xdmac_tasklet(unsigned long data)
+ 
+       if (at_xdmac_chan_is_cyclic(atchan)) {
+               at_xdmac_handle_cyclic(atchan);
+-      } else if ((atchan->status & AT_XDMAC_CIS_LIS)
+-                 || (atchan->status & error_mask)) {
++      } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
++                 || (atchan->irq_status & error_mask)) {
+               struct dma_async_tx_descriptor  *txd;
+ 
+-              if (atchan->status & AT_XDMAC_CIS_RBEIS)
++              if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
+                       dev_err(chan2dev(&atchan->chan), "read bus error!!!");
+-              if (atchan->status & AT_XDMAC_CIS_WBEIS)
++              if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
+                       dev_err(chan2dev(&atchan->chan), "write bus error!!!");
+-              if (atchan->status & AT_XDMAC_CIS_ROIS)
++              if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
+                       dev_err(chan2dev(&atchan->chan), "request overflow 
error!!!");
+ 
+               spin_lock_bh(&atchan->lock);
+@@ -1654,7 +1655,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void 
*dev_id)
+                       atchan = &atxdmac->chan[i];
+                       chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
+                       chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
+-                      atchan->status = chan_status & chan_imr;
++                      atchan->irq_status = chan_status & chan_imr;
+                       dev_vdbg(atxdmac->dma.dev,
+                                "%s: chan%d: imr=0x%x, status=0x%x\n",
+                                __func__, i, chan_imr, chan_status);
+@@ -1668,7 +1669,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void 
*dev_id)
+                                at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
+                                at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
+ 
+-                      if (atchan->status & (AT_XDMAC_CIS_RBEIS | 
AT_XDMAC_CIS_WBEIS))
++                      if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | 
AT_XDMAC_CIS_WBEIS))
+                               at_xdmac_write(atxdmac, AT_XDMAC_GD, 
atchan->mask);
+ 
+                       tasklet_schedule(&atchan->tasklet);
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index ebe72a466587..7dd46cf5ed84 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -583,11 +583,9 @@ static int dmatest_func(void *data)
+                       srcs[i] = um->addr[i] + src_off;
+                       ret = dma_mapping_error(dev->dev, um->addr[i]);
+                       if (ret) {
+-                              dmaengine_unmap_put(um);
+                               result("src mapping error", total_tests,
+                                      src_off, dst_off, len, ret);
+-                              failed_tests++;
+-                              continue;
++                              goto error_unmap_continue;
+                       }
+                       um->to_cnt++;
+               }
+@@ -602,11 +600,9 @@ static int dmatest_func(void *data)
+                                              DMA_BIDIRECTIONAL);
+                       ret = dma_mapping_error(dev->dev, dsts[i]);
+                       if (ret) {
+-                              dmaengine_unmap_put(um);
+                               result("dst mapping error", total_tests,
+                                      src_off, dst_off, len, ret);
+-                              failed_tests++;
+-                              continue;
++                              goto error_unmap_continue;
+                       }
+                       um->bidi_cnt++;
+               }
+@@ -643,12 +639,10 @@ static int dmatest_func(void *data)
+               }
+ 
+               if (!tx) {
+-                      dmaengine_unmap_put(um);
+                       result("prep error", total_tests, src_off,
+                              dst_off, len, ret);
+                       msleep(100);
+-                      failed_tests++;
+-                      continue;
++                      goto error_unmap_continue;
+               }
+ 
+               done->done = false;
+@@ -657,12 +651,10 @@ static int dmatest_func(void *data)
+               cookie = tx->tx_submit(tx);
+ 
+               if (dma_submit_error(cookie)) {
+-                      dmaengine_unmap_put(um);
+                       result("submit error", total_tests, src_off,
+                              dst_off, len, ret);
+                       msleep(100);
+-                      failed_tests++;
+-                      continue;
++                      goto error_unmap_continue;
+               }
+               dma_async_issue_pending(chan);
+ 
+@@ -675,16 +667,14 @@ static int dmatest_func(void *data)
+                       dmaengine_unmap_put(um);
+                       result("test timed out", total_tests, src_off, dst_off,
+                              len, 0);
+-                      failed_tests++;
+-                      continue;
++                      goto error_unmap_continue;
+               } else if (status != DMA_COMPLETE) {
+                       dmaengine_unmap_put(um);
+                       result(status == DMA_ERROR ?
+                              "completion error status" :
+                              "completion busy status", total_tests, src_off,
+                              dst_off, len, ret);
+-                      failed_tests++;
+-                      continue;
++                      goto error_unmap_continue;
+               }
+ 
+               dmaengine_unmap_put(um);
+@@ -727,6 +717,12 @@ static int dmatest_func(void *data)
+                       verbose_result("test passed", total_tests, src_off,
+                                      dst_off, len, 0);
+               }
++
++              continue;
++
++error_unmap_continue:
++              dmaengine_unmap_put(um);
++              failed_tests++;
+       }
+       ktime = ktime_sub(ktime_get(), ktime);
+       ktime = ktime_sub(ktime, comparetime);
+diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
+index 14042a64bdd5..132b9bae4b6a 100644
+--- a/drivers/firmware/iscsi_ibft.c
++++ b/drivers/firmware/iscsi_ibft.c
+@@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int 
type)
+       case ISCSI_BOOT_TGT_NIC_ASSOC:
+       case ISCSI_BOOT_TGT_CHAP_TYPE:
+               rc = S_IRUGO;
++              break;
+       case ISCSI_BOOT_TGT_NAME:
+               if (tgt->tgt_name_len)
+                       rc = S_IRUGO;
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index 3edb09cb9ee0..1f599bc08237 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -221,6 +221,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+       struct vf610_gpio_port *port;
+       struct resource *iores;
+       struct gpio_chip *gc;
++      int i;
+       int ret;
+ 
+       port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+@@ -259,6 +260,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+       if (ret < 0)
+               return ret;
+ 
++      /* Mask all GPIO interrupts */
++      for (i = 0; i < gc->ngpio; i++)
++              vf610_gpio_writel(0, port->base + PORT_PCR(i));
++
+       /* Clear the interrupt status register for all GPIO's */
+       vf610_gpio_writel(~0, port->base + PORT_ISFR);
+ 
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c 
b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+index f2975a1525be..2796fea70a42 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+@@ -327,6 +327,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
+               dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
+               return PTR_ERR(tcon->sclk0);
+       }
++      clk_prepare_enable(tcon->sclk0);
+ 
+       if (tcon->quirks->has_channel_1) {
+               tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+@@ -341,6 +342,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
+ 
+ static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
+ {
++      clk_disable_unprepare(tcon->sclk0);
+       clk_disable_unprepare(tcon->clk);
+ }
+ 
+diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
+index 1a7ce1d740ce..292d7b6a0536 100644
+--- a/drivers/infiniband/hw/hfi1/ud.c
++++ b/drivers/infiniband/hw/hfi1/ud.c
+@@ -772,7 +772,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
+           opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+               wc.ex.imm_data = ohdr->u.ud.imm_data;
+               wc.wc_flags = IB_WC_WITH_IMM;
+-              tlen -= sizeof(u32);
+       } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+               wc.ex.imm_data = 0;
+               wc.wc_flags = 0;
+diff --git a/drivers/infiniband/hw/qib/qib_ud.c 
b/drivers/infiniband/hw/qib/qib_ud.c
+index f45cad1198b0..93012fba287d 100644
+--- a/drivers/infiniband/hw/qib/qib_ud.c
++++ b/drivers/infiniband/hw/qib/qib_ud.c
+@@ -525,7 +525,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header 
*hdr,
+           opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+               wc.ex.imm_data = ohdr->u.ud.imm_data;
+               wc.wc_flags = IB_WC_WITH_IMM;
+-              tlen -= sizeof(u32);
+       } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+               wc.ex.imm_data = 0;
+               wc.wc_flags = 0;
+diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
+index 25ce9047b682..16f5d5660053 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1241,6 +1241,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+       { "ELAN0000", 0 },
+       { "ELAN0100", 0 },
+       { "ELAN0600", 0 },
++      { "ELAN0601", 0 },
+       { "ELAN0602", 0 },
+       { "ELAN0605", 0 },
+       { "ELAN0608", 0 },
+diff --git a/drivers/input/tablet/wacom_serial4.c 
b/drivers/input/tablet/wacom_serial4.c
+index 20ab802461e7..1d46b763aae6 100644
+--- a/drivers/input/tablet/wacom_serial4.c
++++ b/drivers/input/tablet/wacom_serial4.c
+@@ -187,6 +187,7 @@ enum {
+       MODEL_DIGITIZER_II      = 0x5544, /* UD */
+       MODEL_GRAPHIRE          = 0x4554, /* ET */
+       MODEL_PENPARTNER        = 0x4354, /* CT */
++      MODEL_ARTPAD_II         = 0x4B54, /* KT */
+ };
+ 
+ static void wacom_handle_model_response(struct wacom *wacom)
+@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom 
*wacom)
+               wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
+               break;
+ 
++      case MODEL_ARTPAD_II:
+       case MODEL_DIGITIZER_II:
+               wacom->dev->name = "Wacom Digitizer II";
+               wacom->dev->id.version = MODEL_DIGITIZER_II;
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index e984418ffa2a..ca22483d253f 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1896,6 +1896,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
+ 
+ static void do_detach(struct iommu_dev_data *dev_data)
+ {
++      struct protection_domain *domain = dev_data->domain;
+       struct amd_iommu *iommu;
+       u16 alias;
+ 
+@@ -1911,10 +1912,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
+       iommu = amd_iommu_rlookup_table[dev_data->devid];
+       alias = dev_data->alias;
+ 
+-      /* decrease reference counters */
+-      dev_data->domain->dev_iommu[iommu->index] -= 1;
+-      dev_data->domain->dev_cnt                 -= 1;
+-
+       /* Update data structures */
+       dev_data->domain = NULL;
+       list_del(&dev_data->list);
+@@ -1924,6 +1921,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
+ 
+       /* Flush the DTE entry */
+       device_flush_dte(dev_data);
++
++      /* Flush IOTLB */
++      domain_flush_tlb_pde(domain);
++
++      /* Wait for the flushes to finish */
++      domain_flush_complete(domain);
++
++      /* decrease reference counters - needs to happen after the flushes */
++      domain->dev_iommu[iommu->index] -= 1;
++      domain->dev_cnt                 -= 1;
+ }
+ 
+ /*
+@@ -2611,13 +2618,13 @@ out_unmap:
+                       bus_addr  = address + s->dma_address + (j << 
PAGE_SHIFT);
+                       iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
+ 
+-                      if (--mapped_pages)
++                      if (--mapped_pages == 0)
+                               goto out_free_iova;
+               }
+       }
+ 
+ out_free_iova:
+-      free_iova_fast(&dma_dom->iovad, address, npages);
++      free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
+ 
+ out_err:
+       return 0;
+diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
+index 013fc9659a84..2fe2bcb63a71 100644
+--- a/drivers/irqchip/irq-mmp.c
++++ b/drivers/irqchip/irq-mmp.c
+@@ -34,6 +34,9 @@
+ #define SEL_INT_PENDING               (1 << 6)
+ #define SEL_INT_NUM_MASK      0x3f
+ 
++#define MMP2_ICU_INT_ROUTE_PJ4_IRQ    (1 << 5)
++#define MMP2_ICU_INT_ROUTE_PJ4_FIQ    (1 << 6)
++
+ struct icu_chip_data {
+       int                     nr_irqs;
+       unsigned int            virq_base;
+@@ -190,7 +193,8 @@ static struct mmp_intc_conf mmp_conf = {
+ static struct mmp_intc_conf mmp2_conf = {
+       .conf_enable    = 0x20,
+       .conf_disable   = 0x0,
+-      .conf_mask      = 0x7f,
++      .conf_mask      = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
++                        MMP2_ICU_INT_ROUTE_PJ4_FIQ,
+ };
+ 
+ static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
+diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
+index d4e0d1602c80..6b7eed722e43 100644
+--- a/drivers/isdn/i4l/isdn_tty.c
++++ b/drivers/isdn/i4l/isdn_tty.c
+@@ -786,7 +786,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m)
+               cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */
+               cmd.parm.cmsg.para[4] = 0;
+               cmd.parm.cmsg.para[5] = l;
+-              strncpy(&cmd.parm.cmsg.para[6], id, l);
++              strscpy(&cmd.parm.cmsg.para[6], id, l);
+               cmd.command = CAPI_PUT_MESSAGE;
+               cmd.driver = info->isdn_driver;
+               cmd.arg = info->isdn_channel;
+diff --git a/drivers/media/usb/uvc/uvc_driver.c 
b/drivers/media/usb/uvc/uvc_driver.c
+index cde43b63c3da..c630a9f8e356 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1019,11 +1019,19 @@ static int uvc_parse_standard_control(struct 
uvc_device *dev,
+                       return -EINVAL;
+               }
+ 
+-              /* Make sure the terminal type MSB is not null, otherwise it
+-               * could be confused with a unit.
++              /*
++               * Reject invalid terminal types that would cause issues:
++               *
++               * - The high byte must be non-zero, otherwise it would be
++               *   confused with a unit.
++               *
++               * - Bit 15 must be 0, as we use it internally as a terminal
++               *   direction flag.
++               *
++               * Other unknown types are accepted.
+                */
+               type = get_unaligned_le16(&buffer[4]);
+-              if ((type & 0xff00) == 0) {
++              if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
+                       uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
+                               "interface %d INPUT_TERMINAL %d has invalid "
+                               "type 0x%04x, skipping\n", udev->devnum,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c 
b/drivers/net/dsa/mv88e6xxx/chip.c
+index 883fd9809dd2..7cee3e5db56c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -798,7 +798,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct 
mv88e6xxx_chip *chip,
+               if (s->sizeof_stat == 8)
+                       _mv88e6xxx_stats_read(chip, s->reg + 1, &high);
+       }
+-      value = (((u64)high) << 16) | low;
++      value = (((u64)high) << 32) | low;
+       return value;
+ }
+ 
+diff --git a/drivers/net/ethernet/altera/altera_msgdma.c 
b/drivers/net/ethernet/altera/altera_msgdma.c
+index 0fb986ba3290..0ae723f75341 100644
+--- a/drivers/net/ethernet/altera/altera_msgdma.c
++++ b/drivers/net/ethernet/altera/altera_msgdma.c
+@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
+                       & 0xffff;
+ 
+       if (inuse) { /* Tx FIFO is not empty */
+-              ready = priv->tx_prod - priv->tx_cons - inuse - 1;
++              ready = max_t(int,
++                            priv->tx_prod - priv->tx_cons - inuse - 1, 0);
+       } else {
+               /* Check for buffered last packet */
+               status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index a036f7039d76..737f0f6f4075 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -428,6 +428,12 @@ normal_tx:
+       }
+ 
+       length >>= 9;
++      if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
++              dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX 
packet.\n",
++                                   skb->len);
++              i = 0;
++              goto tx_dma_error;
++      }
+       flags |= bnxt_lhint_arr[length];
+       txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c 
b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index a2f7d0834071..ad8681cf5ef0 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -2078,6 +2078,8 @@ static int hns_nic_dev_probe(struct platform_device 
*pdev)
+ out_notify_fail:
+       (void)cancel_work_sync(&priv->service_task);
+ out_read_prop_fail:
++      /* safe for ACPI FW */
++      of_node_put(to_of_node(priv->fwnode));
+       free_netdev(ndev);
+       return ret;
+ }
+@@ -2107,6 +2109,9 @@ static int hns_nic_dev_remove(struct platform_device 
*pdev)
+       set_bit(NIC_STATE_REMOVING, &priv->state);
+       (void)cancel_work_sync(&priv->service_task);
+ 
++      /* safe for ACPI FW */
++      of_node_put(to_of_node(priv->fwnode));
++
+       free_netdev(ndev);
+       return 0;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c 
b/drivers/net/ethernet/hisilicon/hns_mdio.c
+index 501eb2090ca6..de23a0ead5d7 100644
+--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
+@@ -329,7 +329,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, 
int regnum)
+               }
+ 
+               hns_mdio_cmd_write(mdio_dev, is_c45,
+-                                 MDIO_C45_WRITE_ADDR, phy_id, devad);
++                                 MDIO_C45_READ, phy_id, devad);
+       }
+ 
+       /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+diff --git a/drivers/net/ethernet/marvell/sky2.c 
b/drivers/net/ethernet/marvell/sky2.c
+index af11781fe5f9..4ac023a37936 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -46,6 +46,7 @@
+ #include <linux/mii.h>
+ #include <linux/of_device.h>
+ #include <linux/of_net.h>
++#include <linux/dmi.h>
+ 
+ #include <asm/irq.h>
+ 
+@@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128;
+ module_param(copybreak, int, 0);
+ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+ 
+-static int disable_msi = 0;
++static int disable_msi = -1;
+ module_param(disable_msi, int, 0);
+ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+ 
+@@ -4923,6 +4924,24 @@ static const char *sky2_name(u8 chipid, char *buf, int 
sz)
+       return buf;
+ }
+ 
++static const struct dmi_system_id msi_blacklist[] = {
++      {
++              .ident = "Dell Inspiron 1545",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
++              },
++      },
++      {
++              .ident = "Gateway P-79",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
++              },
++      },
++      {}
++};
++
+ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+       struct net_device *dev, *dev1;
+@@ -5034,6 +5053,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+               goto err_out_free_pci;
+       }
+ 
++      if (disable_msi == -1)
++              disable_msi = !!dmi_check_system(msi_blacklist);
++
+       if (!disable_msi && pci_enable_msi(pdev) == 0) {
+               err = sky2_test_msi(hw);
+               if (err) {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c 
b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+index 9cc02b94328a..cf34908ec8e1 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+@@ -158,6 +158,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+       struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+       struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+       struct vf_pf_resc_request *p_resc;
++      u8 retry_cnt = VF_ACQUIRE_THRESH;
+       bool resources_acquired = false;
+       struct vfpf_acquire_tlv *req;
+       int rc = 0, attempts = 0;
+@@ -203,6 +204,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+ 
+               /* send acquire request */
+               rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
++
++              /* Re-try acquire in case of vf-pf hw channel timeout */
++              if (retry_cnt && rc == -EBUSY) {
++                      DP_VERBOSE(p_hwfn, QED_MSG_IOV,
++                                 "VF retrying to acquire due to VPC 
timeout\n");
++                      retry_cnt--;
++                      continue;
++              }
++
+               if (rc)
+                       goto exit;
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c 
b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+index d80c88bd2bba..6e61bccc90b3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -877,8 +877,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
+       }
+ 
+       ret = phy_power_on(bsp_priv, true);
+-      if (ret)
++      if (ret) {
++              gmac_clk_enable(bsp_priv, false);
+               return ret;
++      }
+ 
+       ret = gmac_clk_enable(bsp_priv, true);
+       if (ret)
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 53602fdf5b47..06f77ec44a15 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -593,6 +593,14 @@ void netvsc_linkstatus_callback(struct hv_device 
*device_obj,
+       schedule_delayed_work(&ndev_ctx->dwork, 0);
+ }
+ 
++static void netvsc_comp_ipcsum(struct sk_buff *skb)
++{
++      struct iphdr *iph = (struct iphdr *)skb->data;
++
++      iph->check = 0;
++      iph->check = ip_fast_csum(iph, iph->ihl);
++}
++
+ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+                               struct hv_netvsc_packet *packet,
+                               struct ndis_tcp_ip_checksum_info *csum_info,
+@@ -616,9 +624,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct 
net_device *net,
+       /* skb is already created with CHECKSUM_NONE */
+       skb_checksum_none_assert(skb);
+ 
+-      /*
+-       * In Linux, the IP checksum is always checked.
+-       * Do L4 checksum offload if enabled and present.
++      /* Incoming packets may have IP header checksum verified by the host.
++       * They may not have IP header checksum computed after coalescing.
++       * We compute it here if the flags are set, because on Linux, the IP
++       * checksum is always checked.
++       */
++      if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
++          csum_info->receive.ip_checksum_succeeded &&
++          skb->protocol == htons(ETH_P_IP))
++              netvsc_comp_ipcsum(skb);
++
++      /* Do L4 checksum offload if enabled and present.
+        */
+       if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+               if (csum_info->receive.tcp_checksum_succeeded ||
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 707190d3ada0..16f074408813 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -341,6 +341,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
+       return genphy_config_aneg(phydev);
+ }
+ 
++static int ksz8061_config_init(struct phy_device *phydev)
++{
++      int ret;
++
++      ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
++      if (ret)
++              return ret;
++
++      return kszphy_config_init(phydev);
++}
++
+ static int ksz9021_load_values_from_of(struct phy_device *phydev,
+                                      const struct device_node *of_node,
+                                      u16 reg,
+@@ -940,7 +951,7 @@ static struct phy_driver ksphy_driver[] = {
+       .phy_id_mask    = MICREL_PHY_ID_MASK,
+       .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+       .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+-      .config_init    = kszphy_config_init,
++      .config_init    = ksz8061_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .ack_interrupt  = kszphy_ack_interrupt,
+diff --git a/drivers/net/team/team_mode_loadbalance.c 
b/drivers/net/team/team_mode_loadbalance.c
+index b228bea7931f..de2530830d93 100644
+--- a/drivers/net/team/team_mode_loadbalance.c
++++ b/drivers/net/team/team_mode_loadbalance.c
+@@ -319,6 +319,20 @@ static int lb_bpf_func_set(struct team *team, struct 
team_gsetter_ctx *ctx)
+       return 0;
+ }
+ 
++static void lb_bpf_func_free(struct team *team)
++{
++      struct lb_priv *lb_priv = get_lb_priv(team);
++      struct bpf_prog *fp;
++
++      if (!lb_priv->ex->orig_fprog)
++              return;
++
++      __fprog_destroy(lb_priv->ex->orig_fprog);
++      fp = rcu_dereference_protected(lb_priv->fp,
++                                     lockdep_is_held(&team->lock));
++      bpf_prog_destroy(fp);
++}
++
+ static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
+ {
+       struct lb_priv *lb_priv = get_lb_priv(team);
+@@ -633,6 +647,7 @@ static void lb_exit(struct team *team)
+ 
+       team_options_unregister(team, lb_options,
+                               ARRAY_SIZE(lb_options));
++      lb_bpf_func_free(team);
+       cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
+       free_percpu(lb_priv->pcpu_stats);
+       kfree(lb_priv->ex);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7a0d5e928bec..24cc94453d38 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1471,9 +1471,9 @@ static struct sk_buff *tun_ring_recv(struct tun_file 
*tfile, int noblock,
+       }
+ 
+       add_wait_queue(&tfile->wq.wait, &wait);
+-      current->state = TASK_INTERRUPTIBLE;
+ 
+       while (1) {
++              set_current_state(TASK_INTERRUPTIBLE);
+               skb = skb_array_consume(&tfile->tx_array);
+               if (skb)
+                       break;
+@@ -1489,7 +1489,7 @@ static struct sk_buff *tun_ring_recv(struct tun_file 
*tfile, int noblock,
+               schedule();
+       }
+ 
+-      current->state = TASK_RUNNING;
++      __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&tfile->wq.wait, &wait);
+ 
+ out:
+diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
+index 3b6fb5b3bdb2..6414cc6b9032 100644
+--- a/drivers/net/xen-netback/hash.c
++++ b/drivers/net/xen-netback/hash.c
+@@ -435,6 +435,8 @@ void xenvif_init_hash(struct xenvif *vif)
+       if (xenvif_hash_cache_size == 0)
+               return;
+ 
++      BUG_ON(vif->hash.cache.count);
++
+       spin_lock_init(&vif->hash.cache.lock);
+       INIT_LIST_HEAD(&vif->hash.cache.list);
+ }
+diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
+index 618013e7f87b..cae691486105 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -152,6 +152,13 @@ static u16 xenvif_select_queue(struct net_device *dev, 
struct sk_buff *skb,
+ {
+       struct xenvif *vif = netdev_priv(dev);
+       unsigned int size = vif->hash.size;
++      unsigned int num_queues;
++
++      /* If queues are not set up internally - always return 0
++       * as the packet going to be dropped anyway */
++      num_queues = READ_ONCE(vif->num_queues);
++      if (num_queues < 1)
++              return 0;
+ 
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+               return fallback(dev, skb) % dev->real_num_tx_queues;
+diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
+index a7bdb1ffac2e..f57815befc90 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1074,11 +1074,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue 
*queue, struct sk_buff *s
+               skb_frag_size_set(&frags[i], len);
+       }
+ 
+-      /* Copied all the bits from the frag list -- free it. */
+-      skb_frag_list_init(skb);
+-      xenvif_skb_zerocopy_prepare(queue, nskb);
+-      kfree_skb(nskb);
+-
+       /* Release all the original (foreign) frags. */
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+               skb_frag_unref(skb, f);
+@@ -1147,6 +1142,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+               xenvif_fill_frags(queue, skb);
+ 
+               if (unlikely(skb_has_frag_list(skb))) {
++                      struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
++                      xenvif_skb_zerocopy_prepare(queue, nskb);
+                       if (xenvif_handle_frag_list(queue, skb)) {
+                               if (net_ratelimit())
+                                       netdev_err(queue->vif->dev,
+@@ -1155,6 +1152,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+                               kfree_skb(skb);
+                               continue;
+                       }
++                      /* Copied all the bits from the frag list -- free it. */
++                      skb_frag_list_init(skb);
++                      kfree_skb(nskb);
+               }
+ 
+               skb->dev      = queue->vif->dev;
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index b8a21d7b25d4..1d81149c9ea4 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -945,6 +945,7 @@ config INTEL_OAKTRAIL
+ config SAMSUNG_Q10
+       tristate "Samsung Q10 Extras"
+       depends on ACPI
++      depends on BACKLIGHT_LCD_SUPPORT
+       select BACKLIGHT_CLASS_DEVICE
+       ---help---
+         This driver provides support for backlight control on Samsung Q10
+diff --git a/drivers/s390/net/qeth_core_main.c 
b/drivers/s390/net/qeth_core_main.c
+index 8f77fc0630ce..86a02592b982 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -2449,11 +2449,12 @@ out:
+       return rc;
+ }
+ 
+-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
++static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
+ {
+       if (!q)
+               return;
+ 
++      qeth_clear_outq_buffers(q, 1);
+       qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+       kfree(q);
+ }
+@@ -2526,10 +2527,8 @@ out_freeoutqbufs:
+               card->qdio.out_qs[i]->bufs[j] = NULL;
+       }
+ out_freeoutq:
+-      while (i > 0) {
+-              qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
+-              qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+-      }
++      while (i > 0)
++              qeth_free_output_queue(card->qdio.out_qs[--i]);
+       kfree(card->qdio.out_qs);
+       card->qdio.out_qs = NULL;
+ out_freepool:
+@@ -2562,10 +2561,8 @@ static void qeth_free_qdio_buffers(struct qeth_card 
*card)
+       qeth_free_buffer_pool(card);
+       /* free outbound qdio_qs */
+       if (card->qdio.out_qs) {
+-              for (i = 0; i < card->qdio.no_out_queues; ++i) {
+-                      qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+-                      qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
+-              }
++              for (i = 0; i < card->qdio.no_out_queues; i++)
++                      qeth_free_output_queue(card->qdio.out_qs[i]);
+               kfree(card->qdio.out_qs);
+               card->qdio.out_qs = NULL;
+       }
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index fe670b696251..2d9696b3d432 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1179,8 +1179,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct 
fib * fibptr)
+                                 ADD : DELETE;
+                               break;
+                       }
+-                      case AifBuManagerEvent:
+-                              aac_handle_aif_bu(dev, aifcmd);
++                      break;
++              case AifBuManagerEvent:
++                      aac_handle_aif_bu(dev, aifcmd);
+                       break;
+               }
+ 
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 50c71678a156..ae93f45f9cd8 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -1736,14 +1736,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct 
fc_frame *fp,
+           fc_frame_payload_op(fp) != ELS_LS_ACC) {
+               FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
+               fc_lport_error(lport, fp);
+-              goto err;
++              goto out;
+       }
+ 
+       flp = fc_frame_payload_get(fp, sizeof(*flp));
+       if (!flp) {
+               FC_LPORT_DBG(lport, "FLOGI bad response\n");
+               fc_lport_error(lport, fp);
+-              goto err;
++              goto out;
+       }
+ 
+       mfs = ntohs(flp->fl_csp.sp_bb_data) &
+@@ -1753,7 +1753,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct 
fc_frame *fp,
+               FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+                            "lport->mfs:%hu\n", mfs, lport->mfs);
+               fc_lport_error(lport, fp);
+-              goto err;
++              goto out;
+       }
+ 
+       if (mfs <= lport->mfs) {
+diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
+index 2cc82ed6433a..91f5c951850f 100644
+--- a/drivers/soc/fsl/qbman/qman.c
++++ b/drivers/soc/fsl/qbman/qman.c
+@@ -1073,18 +1073,19 @@ static void qm_mr_process_task(struct work_struct 
*work);
+ static irqreturn_t portal_isr(int irq, void *ptr)
+ {
+       struct qman_portal *p = ptr;
+-
+-      u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+       u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
++      u32 clear = 0;
+ 
+       if (unlikely(!is))
+               return IRQ_NONE;
+ 
+       /* DQRR-handling if it's interrupt-driven */
+-      if (is & QM_PIRQ_DQRI)
++      if (is & QM_PIRQ_DQRI) {
+               __poll_portal_fast(p, QMAN_POLL_LIMIT);
++              clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
++      }
+       /* Handling of anything else that's interrupt-driven */
+-      clear |= __poll_portal_slow(p, is);
++      clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
+       qm_out(&p->p, QM_REG_ISR, clear);
+       return IRQ_HANDLED;
+ }
+diff --git a/drivers/staging/android/ion/ion_system_heap.c 
b/drivers/staging/android/ion/ion_system_heap.c
+index d270a424ecac..22c481f2ae4f 100644
+--- a/drivers/staging/android/ion/ion_system_heap.c
++++ b/drivers/staging/android/ion/ion_system_heap.c
+@@ -307,10 +307,10 @@ static int ion_system_heap_create_pools(struct 
ion_page_pool **pools,
+                                       bool cached)
+ {
+       int i;
+-      gfp_t gfp_flags = low_order_gfp_flags;
+ 
+       for (i = 0; i < NUM_ORDERS; i++) {
+               struct ion_page_pool *pool;
++              gfp_t gfp_flags = low_order_gfp_flags;
+ 
+               if (orders[i] > 4)
+                       gfp_flags = high_order_gfp_flags;
+diff --git a/drivers/staging/comedi/drivers/ni_660x.c 
b/drivers/staging/comedi/drivers/ni_660x.c
+index 0dcb826a9f1f..723bdd2c2c74 100644
+--- a/drivers/staging/comedi/drivers/ni_660x.c
++++ b/drivers/staging/comedi/drivers/ni_660x.c
+@@ -606,6 +606,7 @@ static int ni_660x_set_pfi_routing(struct comedi_device 
*dev,
+       case NI_660X_PFI_OUTPUT_DIO:
+               if (chan > 31)
+                       return -EINVAL;
++              break;
+       default:
+               return -EINVAL;
+       }
+diff --git a/drivers/staging/wilc1000/linux_wlan.c 
b/drivers/staging/wilc1000/linux_wlan.c
+index 2e5e3b368532..b7203867ea9d 100644
+--- a/drivers/staging/wilc1000/linux_wlan.c
++++ b/drivers/staging/wilc1000/linux_wlan.c
+@@ -1263,8 +1263,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device 
*dev, int io_type,
+               vif->wilc = *wilc;
+               vif->ndev = ndev;
+               wl->vif[i] = vif;
+-              wl->vif_num = i;
+-              vif->idx = wl->vif_num;
++              wl->vif_num = i + 1;
++              vif->idx = i;
+ 
+               ndev->netdev_ops = &wilc_netdev_ops;
+ 
+diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
+index 125cea1c3c8d..19ce615455c1 100644
+--- a/drivers/usb/phy/Kconfig
++++ b/drivers/usb/phy/Kconfig
+@@ -20,7 +20,7 @@ config AB8500_USB
+ 
+ config FSL_USB2_OTG
+       bool "Freescale USB OTG Transceiver Driver"
+-      depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
++      depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 
'y'
+       select USB_PHY
+       help
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index c2b120021443..7bbf2ca73f68 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -58,6 +58,7 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless 
smartcard reader */
+       { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC 
Device */
+       { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console 
*/
++      { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
+       { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher 
Acceptor */
+       { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
+       { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 2e2f736384ab..b88a72220acd 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1020,6 +1020,8 @@ static const struct usb_device_id id_table_combined[] = {
+       { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+       { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+       { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
++      /* EZPrototypes devices */
++      { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
+       { }                                     /* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h 
b/drivers/usb/serial/ftdi_sio_ids.h
+index 76a10b222ff9..ddf5ab983dc9 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1307,6 +1307,12 @@
+ #define IONICS_VID                    0x1c0c
+ #define IONICS_PLUGCOMPUTER_PID               0x0102
+ 
++/*
++ * EZPrototypes (PID reseller)
++ */
++#define EZPROTOTYPES_VID              0x1c40
++#define HJELMSLUND_USB485_ISO_PID     0x0477
++
+ /*
+  * Dresden Elektronik Sensor Terminal Board
+  */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 7bc2c9fef605..b2b7c12e5c86 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1147,6 +1147,8 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+         .driver_info = NCTRL(0) | RSVD(3) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff),    /* 
Telit ME910 (ECM) */
++        .driver_info = NCTRL(0) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index d8e6d421c27f..2e1f50e467f1 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -563,7 +563,6 @@ int autofs4_expire_run(struct super_block *sb,
+       pkt.len = dentry->d_name.len;
+       memcpy(pkt.name, dentry->d_name.name, pkt.len);
+       pkt.name[pkt.len] = '\0';
+-      dput(dentry);
+ 
+       if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
+               ret = -EFAULT;
+@@ -576,6 +575,8 @@ int autofs4_expire_run(struct super_block *sb,
+       complete_all(&ino->expire_complete);
+       spin_unlock(&sbi->fs_lock);
+ 
++      dput(dentry);
++
+       return ret;
+ }
+ 
+diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
+index ce0c6ea96a87..d9a3264909d0 100644
+--- a/fs/autofs4/inode.c
++++ b/fs/autofs4/inode.c
+@@ -259,8 +259,10 @@ int autofs4_fill_super(struct super_block *s, void *data, 
int silent)
+       }
+       root_inode = autofs4_get_inode(s, S_IFDIR | 0755);
+       root = d_make_root(root_inode);
+-      if (!root)
++      if (!root) {
++              ret = -ENOMEM;
+               goto fail_ino;
++      }
+       pipe = NULL;
+ 
+       root->d_fsdata = ino;
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 5d8f496d624e..e0d46d47e358 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -207,6 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t 
block)
+       struct buffer_head *head;
+       struct page *page;
+       int all_mapped = 1;
++      static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
+ 
+       index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
+       page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
+@@ -234,15 +235,15 @@ __find_get_block_slow(struct block_device *bdev, 
sector_t block)
+        * file io on the block device and getblk.  It gets dealt with
+        * elsewhere, don't buffer_error if we had some unmapped buffers
+        */
+-      if (all_mapped) {
+-              printk("__find_get_block_slow() failed. "
+-                      "block=%llu, b_blocknr=%llu\n",
+-                      (unsigned long long)block,
+-                      (unsigned long long)bh->b_blocknr);
+-              printk("b_state=0x%08lx, b_size=%zu\n",
+-                      bh->b_state, bh->b_size);
+-              printk("device %pg blocksize: %d\n", bdev,
+-                      1 << bd_inode->i_blkbits);
++      ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
++      if (all_mapped && __ratelimit(&last_warned)) {
++              printk("__find_get_block_slow() failed. block=%llu, "
++                     "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
++                     "device %pg blocksize: %d\n",
++                     (unsigned long long)block,
++                     (unsigned long long)bh->b_blocknr,
++                     bh->b_state, bh->b_size, bdev,
++                     1 << bd_inode->i_blkbits);
+       }
+ out_unlock:
+       spin_unlock(&bd_mapping->private_lock);
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 1e1449ad00e8..1af7afae3ad1 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -84,8 +84,8 @@
+ 
+ #define NUMBER_OF_SMB2_COMMANDS       0x0013
+ 
+-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+-#define MAX_SMB2_HDR_SIZE 0x00b0
++/* 52 transform hdr + 64 hdr + 88 create rsp */
++#define MAX_SMB2_HDR_SIZE 204
+ 
+ #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
+ #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
+diff --git a/fs/drop_caches.c b/fs/drop_caches.c
+index d72d52b90433..280460fef066 100644
+--- a/fs/drop_caches.c
++++ b/fs/drop_caches.c
+@@ -20,8 +20,13 @@ static void drop_pagecache_sb(struct super_block *sb, void 
*unused)
+       spin_lock(&sb->s_inode_list_lock);
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+               spin_lock(&inode->i_lock);
++              /*
++               * We must skip inodes in unusual state. We may also skip
++               * inodes without pages but we deliberately won't in case
++               * we need to reschedule to avoid softlockups.
++               */
+               if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+-                  (inode->i_mapping->nrpages == 0)) {
++                  (inode->i_mapping->nrpages == 0 && !need_resched())) {
+                       spin_unlock(&inode->i_lock);
+                       continue;
+               }
+@@ -29,6 +34,7 @@ static void drop_pagecache_sb(struct super_block *sb, void 
*unused)
+               spin_unlock(&inode->i_lock);
+               spin_unlock(&sb->s_inode_list_lock);
+ 
++              cond_resched();
+               invalidate_mapping_pages(inode->i_mapping, 0, -1);
+               iput(toput_inode);
+               toput_inode = inode;
+diff --git a/fs/exec.c b/fs/exec.c
+index fcd8642ef2d2..81477116035d 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -938,7 +938,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t 
*size,
+                                   i_size - pos);
+               if (bytes < 0) {
+                       ret = bytes;
+-                      goto out;
++                      goto out_free;
+               }
+ 
+               if (bytes == 0)
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index f53c139c312e..001487b230b5 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -861,6 +861,18 @@ static int hugetlbfs_migrate_page(struct address_space 
*mapping,
+       rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+       if (rc != MIGRATEPAGE_SUCCESS)
+               return rc;
++
++      /*
++       * page_private is subpool pointer in hugetlb pages.  Transfer to
++       * new page.  PagePrivate is not associated with page_private for
++       * hugetlb pages and can not be set here as only page_huge_active
++       * pages can be migrated.
++       */
++      if (page_private(page)) {
++              set_page_private(newpage, page_private(page));
++              set_page_private(page, 0);
++      }
++
+       migrate_page_copy(newpage, page);
+ 
+       return MIGRATEPAGE_SUCCESS;
+diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
+index 0a3f9b594602..37779ed3f790 100644
+--- a/fs/ncpfs/ioctl.c
++++ b/fs/ncpfs/ioctl.c
+@@ -233,7 +233,7 @@ ncp_get_charsets(struct ncp_server* server, struct 
ncp_nls_ioctl __user *arg)
+               len = strlen(server->nls_vol->charset);
+               if (len > NCP_IOCSNAME_LEN)
+                       len = NCP_IOCSNAME_LEN;
+-              strncpy(user.codepage, server->nls_vol->charset, len);
++              strscpy(user.codepage, server->nls_vol->charset, 
NCP_IOCSNAME_LEN);
+               user.codepage[len] = 0;
+       }
+ 
+@@ -243,7 +243,7 @@ ncp_get_charsets(struct ncp_server* server, struct 
ncp_nls_ioctl __user *arg)
+               len = strlen(server->nls_io->charset);
+               if (len > NCP_IOCSNAME_LEN)
+                       len = NCP_IOCSNAME_LEN;
+-              strncpy(user.iocharset, server->nls_io->charset, len);
++              strscpy(user.iocharset, server->nls_io->charset, 
NCP_IOCSNAME_LEN);
+               user.iocharset[len] = 0;
+       }
+       mutex_unlock(&server->root_setup_lock);
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 35aef192a13f..659ad12e33ba 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1904,6 +1904,11 @@ static int nfs_parse_devname(const char *dev_name,
+       size_t len;
+       char *end;
+ 
++      if (unlikely(!dev_name || !*dev_name)) {
++              dfprintk(MOUNT, "NFS: device name not specified\n");
++              return -EINVAL;
++      }
++
+       /* Is the host name protected with square brakcets? */
+       if (*dev_name == '[') {
+               end = strchr(++dev_name, ']');
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index cebecff536a3..c5fb6f871930 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -41,6 +41,24 @@ static inline bool drm_arch_can_wc_memory(void)
+       return false;
+ #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+       return false;
++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++      /*
++       * The DRM driver stack is designed to work with cache coherent devices
++       * only, but permits an optimization to be enabled in some cases, where
++       * for some buffers, both the CPU and the GPU use uncached mappings,
++       * removing the need for DMA snooping and allocation in the CPU caches.
++       *
++       * The use of uncached GPU mappings relies on the correct implementation
++       * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
++       * will use cached mappings nonetheless. On x86 platforms, this does not
++       * seem to matter, as uncached CPU mappings will snoop the caches in any
++       * case. However, on ARM and arm64, enabling this optimization on a
++       * platform where NoSnoop is ignored results in loss of coherency, which
++       * breaks correct operation of the device. Since we have no way of
++       * detecting whether NoSnoop works or not, just disable this
++       * optimization entirely for ARM and arm64.
++       */
++      return false;
+ #else
+       return true;
+ #endif
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 32dc0cbd51ca..9d9e0b54f831 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -234,20 +234,12 @@ __ATTR(_name, _perm, show_##_name, NULL)
+ static struct freq_attr _name =                       \
+ __ATTR(_name, 0644, show_##_name, store_##_name)
+ 
+-struct global_attr {
+-      struct attribute attr;
+-      ssize_t (*show)(struct kobject *kobj,
+-                      struct attribute *attr, char *buf);
+-      ssize_t (*store)(struct kobject *a, struct attribute *b,
+-                       const char *c, size_t count);
+-};
+-
+ #define define_one_global_ro(_name)           \
+-static struct global_attr _name =             \
++static struct kobj_attribute _name =          \
+ __ATTR(_name, 0444, show_##_name, NULL)
+ 
+ #define define_one_global_rw(_name)           \
+-static struct global_attr _name =             \
++static struct kobj_attribute _name =          \
+ __ATTR(_name, 0644, show_##_name, store_##_name)
+ 
+ 
+diff --git a/include/net/icmp.h b/include/net/icmp.h
+index 3ef2743a8eec..8665bf24e3b7 100644
+--- a/include/net/icmp.h
++++ b/include/net/icmp.h
+@@ -22,6 +22,7 @@
+ 
+ #include <net/inet_sock.h>
+ #include <net/snmp.h>
++#include <net/ip.h>
+ 
+ struct icmp_err {
+   int         errno;
+@@ -39,7 +40,13 @@ struct net_proto_family;
+ struct sk_buff;
+ struct net;
+ 
+-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
++               const struct ip_options *opt);
++static inline void icmp_send(struct sk_buff *skb_in, int type, int code, 
__be32 info)
++{
++      __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
++}
++
+ int icmp_rcv(struct sk_buff *skb);
+ void icmp_err(struct sk_buff *skb, u32 info);
+ int icmp_init(void);
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 8646da034851..f06cd30bb44c 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -570,6 +570,8 @@ static inline int ip_options_echo(struct ip_options *dopt, 
struct sk_buff *skb)
+ }
+ 
+ void ip_options_fragment(struct sk_buff *skb);
++int __ip_options_compile(struct net *net, struct ip_options *opt,
++                       struct sk_buff *skb, __be32 *info);
+ int ip_options_compile(struct net *net, struct ip_options *opt,
+                      struct sk_buff *skb);
+ int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 17339506f9f8..5cbb2eda80b5 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -428,18 +428,18 @@ int perf_proc_update_handler(struct ctl_table *table, 
int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+ {
+-      int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+-
+-      if (ret || !write)
+-              return ret;
+-
++      int ret;
++      int perf_cpu = sysctl_perf_cpu_time_max_percent;
+       /*
+        * If throttling is disabled don't allow the write:
+        */
+-      if (sysctl_perf_cpu_time_max_percent == 100 ||
+-          sysctl_perf_cpu_time_max_percent == 0)
++      if (write && (perf_cpu == 100 || perf_cpu == 0))
+               return -EINVAL;
+ 
++      ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
++      if (ret || !write)
++              return ret;
++
+       max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
+       perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
+       update_perf_cpu_limits();
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 053d7be08be5..30fe0432c46d 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2966,10 +2966,13 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, 
unsigned int flags,
+                */
+               WARN_ON(!q.pi_state);
+               pi_mutex = &q.pi_state->pi_mutex;
+-              ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
+-              debug_rt_mutex_free_waiter(&rt_waiter);
++              ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
+ 
+               spin_lock(q.lock_ptr);
++              if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
++                      ret = 0;
++
++              debug_rt_mutex_free_waiter(&rt_waiter);
+               /*
+                * Fixup the pi_state owner and possibly acquire the lock if we
+                * haven't already.
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 196cc460e38d..7615e7722258 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1746,21 +1746,23 @@ struct task_struct *rt_mutex_next_owner(struct 
rt_mutex *lock)
+ }
+ 
+ /**
+- * rt_mutex_finish_proxy_lock() - Complete lock acquisition
++ * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
+  * @lock:             the rt_mutex we were woken on
+  * @to:                       the timeout, null if none. hrtimer should 
already have
+  *                    been started.
+  * @waiter:           the pre-initialized rt_mutex_waiter
+  *
+- * Complete the lock acquisition started our behalf by another thread.
++ * Wait for the the lock acquisition started on our behalf by
++ * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
++ * rt_mutex_cleanup_proxy_lock().
+  *
+  * Returns:
+  *  0 - success
+  * <0 - error, one of -EINTR, -ETIMEDOUT
+  *
+- * Special API call for PI-futex requeue support
++ * Special API call for PI-futex support
+  */
+-int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
++int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
+                              struct hrtimer_sleeper *to,
+                              struct rt_mutex_waiter *waiter)
+ {
+@@ -1773,9 +1775,6 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+       /* sleep on the mutex */
+       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
+ 
+-      if (unlikely(ret))
+-              remove_waiter(lock, waiter);
+-
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+        * have to fix that up.
+@@ -1786,3 +1785,42 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+ 
+       return ret;
+ }
++
++/**
++ * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
++ * @lock:             the rt_mutex we were woken on
++ * @waiter:           the pre-initialized rt_mutex_waiter
++ *
++ * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
++ *
++ * Unless we acquired the lock; we're still enqueued on the wait-list and can
++ * in fact still be granted ownership until we're removed. Therefore we can
++ * find we are in fact the owner and must disregard the
++ * rt_mutex_wait_proxy_lock() failure.
++ *
++ * Returns:
++ *  true  - did the cleanup, we done.
++ *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
++ *          caller should disregards its return value.
++ *
++ * Special API call for PI-futex support
++ */
++bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
++                               struct rt_mutex_waiter *waiter)
++{
++      bool cleanup = false;
++
++      raw_spin_lock_irq(&lock->wait_lock);
++      /*
++       * Unless we're the owner; we're still enqueued on the wait_list.
++       * So check if we became owner, if not, take us off the wait_list.
++       */
++      if (rt_mutex_owner(lock) != current) {
++              remove_waiter(lock, waiter);
++              fixup_rt_mutex_waiters(lock);
++              cleanup = true;
++      }
++      raw_spin_unlock_irq(&lock->wait_lock);
++
++      return cleanup;
++}
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 50848b460851..14cbafed0014 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -107,9 +107,11 @@ extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+ extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+                                    struct rt_mutex_waiter *waiter,
+                                    struct task_struct *task);
+-extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+-                                    struct hrtimer_sleeper *to,
+-                                    struct rt_mutex_waiter *waiter);
++extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
++                             struct hrtimer_sleeper *to,
++                             struct rt_mutex_waiter *waiter);
++extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
++                               struct rt_mutex_waiter *waiter);
+ extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct 
hrtimer_sleeper *to);
+ extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
+                                 struct wake_q_head *wqh);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 3e50fcfe6ad8..8b682da98d95 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3579,7 +3579,6 @@ retry_avoidcopy:
+       copy_user_huge_page(new_page, old_page, address, vma,
+                           pages_per_huge_page(h));
+       __SetPageUptodate(new_page);
+-      set_page_huge_active(new_page);
+ 
+       mmun_start = address & huge_page_mask(h);
+       mmun_end = mmun_start + huge_page_size(h);
+@@ -3601,6 +3600,7 @@ retry_avoidcopy:
+                               make_huge_pte(vma, new_page, 1));
+               page_remove_rmap(old_page, true);
+               hugepage_add_new_anon_rmap(new_page, vma, address);
++              set_page_huge_active(new_page);
+               /* Make the old page be freed below */
+               new_page = old_page;
+       }
+@@ -3683,6 +3683,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
+       struct page *page;
+       pte_t new_pte;
+       spinlock_t *ptl;
++      bool new_page = false;
+ 
+       /*
+        * Currently, we are forced to kill the process in the event the
+@@ -3716,7 +3717,7 @@ retry:
+               }
+               clear_huge_page(page, address, pages_per_huge_page(h));
+               __SetPageUptodate(page);
+-              set_page_huge_active(page);
++              new_page = true;
+ 
+               if (vma->vm_flags & VM_MAYSHARE) {
+                       int err = huge_add_to_page_cache(page, mapping, idx);
+@@ -3788,6 +3789,15 @@ retry:
+       }
+ 
+       spin_unlock(ptl);
++
++      /*
++       * Only make newly allocated pages active.  Existing pages found
++       * in the pagecache could be !page_huge_active() if they have been
++       * isolated for migration.
++       */
++      if (new_page)
++              set_page_huge_active(page);
++
+       unlock_page(page);
+ out:
+       return ret;
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index e4c271298074..b4c8d7b9ab82 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1471,7 +1471,8 @@ static struct page *next_active_pageblock(struct page 
*page)
+ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
+ {
+       struct page *page = pfn_to_page(start_pfn);
+-      struct page *end_page = page + nr_pages;
++      unsigned long end_pfn = min(start_pfn + nr_pages, 
zone_end_pfn(page_zone(page)));
++      struct page *end_page = pfn_to_page(end_pfn);
+ 
+       /* Check the starting page of each pageblock within the range */
+       for (; page < end_page; page = next_active_pageblock(page)) {
+@@ -1511,6 +1512,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, 
unsigned long end_pfn,
+                               i++;
+                       if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
+                               continue;
++                      /* Check if we got outside of the zone */
++                      if (zone && !zone_spans_pfn(zone, pfn + i))
++                              return 0;
+                       page = pfn_to_page(pfn + i);
+                       if (zone && page_zone(page) != zone)
+                               return 0;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index b08c1a4a1c22..b810ac1359f0 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1234,6 +1234,16 @@ static int unmap_and_move_huge_page(new_page_t 
get_new_page,
+               lock_page(hpage);
+       }
+ 
++      /*
++       * Check for pages which are in the process of being freed.  Without
++       * page_mapping() set, hugetlbfs specific move page routine will not
++       * be called and we could leak usage counts for subpools.
++       */
++      if (page_private(hpage) && !page_mapping(hpage)) {
++              rc = -EBUSY;
++              goto out_unlock;
++      }
++
+       if (PageAnon(hpage))
+               anon_vma = page_get_anon_vma(hpage);
+ 
+@@ -1265,6 +1275,7 @@ put_anon:
+               set_page_owner_migrate_reason(new_hpage, reason);
+       }
+ 
++out_unlock:
+       unlock_page(hpage);
+ out:
+       if (rc != -EAGAIN)
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index 6e4f34721080..3333693d8052 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1380,6 +1380,9 @@ static int register_queue_kobjects(struct net_device 
*dev)
+ error:
+       netdev_queue_update_kobjects(dev, txq, 0);
+       net_rx_queue_update_kobjects(dev, rxq, 0);
++#ifdef CONFIG_SYSFS
++      kset_unregister(dev->queues_kset);
++#endif
+       return error;
+ }
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 571d079e262f..71bcab94c5c7 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const struct 
cipso_v4_doi *doi_def, u8 level)
+       case CIPSO_V4_MAP_PASS:
+               return 0;
+       case CIPSO_V4_MAP_TRANS:
+-              if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
++              if ((level < doi_def->map.std->lvl.cipso_size) &&
++                  (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
+                       return 0;
+               break;
+       }
+@@ -1735,13 +1736,26 @@ validate_return:
+  */
+ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
+ {
++      unsigned char optbuf[sizeof(struct ip_options) + 40];
++      struct ip_options *opt = (struct ip_options *)optbuf;
++
+       if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
+               return;
+ 
++      /*
++       * We might be called above the IP layer,
++       * so we can not use icmp_send and IPCB here.
++       */
++
++      memset(opt, 0, sizeof(struct ip_options));
++      opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
++      if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
++              return;
++
+       if (gateway)
+-              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
++              __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
+       else
+-              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
++              __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
+ }
+ 
+ /**
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 31f17f0bbd1c..172d3dfed0c4 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -565,7 +565,8 @@ relookup_failed:
+  *                    MUST reply to only the first fragment.
+  */
+ 
+-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
++               const struct ip_options *opt)
+ {
+       struct iphdr *iph;
+       int room;
+@@ -679,7 +680,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, 
__be32 info)
+                                         iph->tos;
+       mark = IP4_REPLY_MARK(net, skb_in->mark);
+ 
+-      if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
++      if (__ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in, opt))
+               goto out_unlock;
+ 
+ 
+@@ -731,7 +732,7 @@ out_free:
+       kfree(icmp_param);
+ out:;
+ }
+-EXPORT_SYMBOL(icmp_send);
++EXPORT_SYMBOL(__icmp_send);
+ 
+ 
+ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 4d158ff1def1..4cd3b5ad9cee 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -253,8 +253,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff 
*skb)
+  * If opt == NULL, then skb->data should point to IP header.
+  */
+ 
+-int ip_options_compile(struct net *net,
+-                     struct ip_options *opt, struct sk_buff *skb)
++int __ip_options_compile(struct net *net,
++                       struct ip_options *opt, struct sk_buff *skb,
++                       __be32 *info)
+ {
+       __be32 spec_dst = htonl(INADDR_ANY);
+       unsigned char *pp_ptr = NULL;
+@@ -470,11 +471,22 @@ eol:
+               return 0;
+ 
+ error:
+-      if (skb) {
+-              icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
+-      }
++      if (info)
++              *info = htonl((pp_ptr-iph)<<24);
+       return -EINVAL;
+ }
++
++int ip_options_compile(struct net *net,
++                     struct ip_options *opt, struct sk_buff *skb)
++{
++      int ret;
++      __be32 info;
++
++      ret = __ip_options_compile(net, opt, skb, &info);
++      if (ret != 0 && skb)
++              icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
++      return ret;
++}
+ EXPORT_SYMBOL(ip_options_compile);
+ 
+ /*
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index cbff0d6ff1ac..270e79f4d40e 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -74,6 +74,33 @@ drop:
+       return 0;
+ }
+ 
++static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
++                   int encap_type)
++{
++      struct ip_tunnel *tunnel;
++      const struct iphdr *iph = ip_hdr(skb);
++      struct net *net = dev_net(skb->dev);
++      struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
++
++      tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
++                                iph->saddr, iph->daddr, 0);
++      if (tunnel) {
++              if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
++                      goto drop;
++
++              XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
++
++              skb->dev = tunnel->dev;
++
++              return xfrm_input(skb, nexthdr, spi, encap_type);
++      }
++
++      return -EINVAL;
++drop:
++      kfree_skb(skb);
++      return 0;
++}
++
+ static int vti_rcv(struct sk_buff *skb)
+ {
+       XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
+       return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
+ }
+ 
++static int vti_rcv_ipip(struct sk_buff *skb)
++{
++      XFRM_SPI_SKB_CB(skb)->family = AF_INET;
++      XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
++
++      return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 
0);
++}
++
+ static int vti_rcv_cb(struct sk_buff *skb, int err)
+ {
+       unsigned short family;
+@@ -439,6 +474,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol 
__read_mostly = {
+       .priority       =       100,
+ };
+ 
++static struct xfrm_tunnel ipip_handler __read_mostly = {
++      .handler        =       vti_rcv_ipip,
++      .err_handler    =       vti4_err,
++      .priority       =       0,
++};
++
+ static int __net_init vti_init_net(struct net *net)
+ {
+       int err;
+@@ -622,6 +663,13 @@ static int __init vti_init(void)
+       if (err < 0)
+               goto xfrm_proto_comp_failed;
+ 
++      msg = "ipip tunnel";
++      err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
++      if (err < 0) {
++              pr_info("%s: cant't register tunnel\n",__func__);
++              goto xfrm_tunnel_failed;
++      }
++
+       msg = "netlink interface";
+       err = rtnl_link_register(&vti_link_ops);
+       if (err < 0)
+@@ -631,6 +679,8 @@ static int __init vti_init(void)
+ 
+ rtnl_link_failed:
+       xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
++xfrm_tunnel_failed:
++      xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+ xfrm_proto_comp_failed:
+       xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+ xfrm_proto_ah_failed:
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index ad597b4b22a0..41f67629ae59 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -1992,10 +1992,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int 
cmd, void __user *arg)
+ 
+ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, 
struct sk_buff *skb)
+ {
+-      __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                      IPSTATS_MIB_OUTFORWDATAGRAMS);
+-      __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                      IPSTATS_MIB_OUTOCTETS, skb->len);
++      IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
++                    IPSTATS_MIB_OUTFORWDATAGRAMS);
++      IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
++                    IPSTATS_MIB_OUTOCTETS, skb->len);
+       return dst_output(net, sk, skb);
+ }
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 4381ea53fa91..75de3dd8b862 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1851,6 +1851,7 @@ static int __net_init sit_init_net(struct net *net)
+ 
+ err_reg_dev:
+       ipip6_dev_free(sitn->fb_tunnel_dev);
++      free_netdev(sitn->fb_tunnel_dev);
+ err_alloc_dev:
+       return err;
+ }
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 8382b7880b24..8037b25ddb76 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2258,6 +2258,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, 
struct ip_vs_timeout_user
+                 u->tcp_fin_timeout,
+                 u->udp_timeout);
+ 
++#ifdef CONFIG_IP_VS_PROTO_TCP
++      if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
++          u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
++              return -EINVAL;
++      }
++#endif
++
++#ifdef CONFIG_IP_VS_PROTO_UDP
++      if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
++              return -EINVAL;
++#endif
++
+ #ifdef CONFIG_IP_VS_PROTO_TCP
+       if (u->tcp_timeout) {
+               pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
+diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
+index 19b3f4fbea52..df1d5618b008 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -855,6 +855,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple 
*tuple,
+               }
+ 
+               if (nf_ct_key_equal(h, tuple, zone, net)) {
++                      /* Tuple is taken already, so caller will need to find
++                       * a new source port to use.
++                       *
++                       * Only exception:
++                       * If the *original tuples* are identical, then both
++                       * conntracks refer to the same flow.
++                       * This is a rare situation, it can occur e.g. when
++                       * more than one UDP packet is sent from same socket
++                       * in different threads.
++                       *
++                       * Let nf_ct_resolve_clash() deal with this later.
++                       */
++                      if 
(nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
++                                            
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
++                              continue;
++
+                       NF_CT_STAT_INC_ATOMIC(net, found);
+                       rcu_read_unlock();
+                       return 1;
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index 28c56b95fb7f..cb9d1d1210cb 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 
bitmap_len,
+                   (state == 0 && (byte & bitmask) == 0))
+                       return bit_spot;
+ 
+-              bit_spot++;
++              if (++bit_spot >= bitmap_len)
++                      return -1;
+               bitmask >>= 1;
+               if (bitmask == 0) {
+                       byte = bitmap[++byte_offset];
+diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
+index 04759a0c3273..6ba829f2df91 100644
+--- a/net/nfc/llcp_commands.c
++++ b/net/nfc/llcp_commands.c
+@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+                                                     sock->service_name,
+                                                     sock->service_name_len,
+                                                     &service_name_tlv_length);
++              if (!service_name_tlv) {
++                      err = -ENOMEM;
++                      goto error_tlv;
++              }
+               size += service_name_tlv_length;
+       }
+ 
+@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+ 
+       miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+                                     &miux_tlv_length);
++      if (!miux_tlv) {
++              err = -ENOMEM;
++              goto error_tlv;
++      }
+       size += miux_tlv_length;
+ 
+       rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
++      if (!rw_tlv) {
++              err = -ENOMEM;
++              goto error_tlv;
++      }
+       size += rw_tlv_length;
+ 
+       pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
+@@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
+ 
+       miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+                                     &miux_tlv_length);
++      if (!miux_tlv) {
++              err = -ENOMEM;
++              goto error_tlv;
++      }
+       size += miux_tlv_length;
+ 
+       rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
++      if (!rw_tlv) {
++              err = -ENOMEM;
++              goto error_tlv;
++      }
+       size += rw_tlv_length;
+ 
+       skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index e69786c6804c..a121d796fa51 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct 
nfc_llcp_local *local)
+ 
+ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
+ {
+-      u8 *gb_cur, *version_tlv, version, version_length;
+-      u8 *lto_tlv, lto_length;
+-      u8 *wks_tlv, wks_length;
+-      u8 *miux_tlv, miux_length;
++      u8 *gb_cur, version, version_length;
++      u8 lto_length, wks_length, miux_length;
++      u8 *version_tlv = NULL, *lto_tlv = NULL,
++         *wks_tlv = NULL, *miux_tlv = NULL;
+       __be16 wks = cpu_to_be16(local->local_wks);
+       u8 gb_len = 0;
+       int ret = 0;
+@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local 
*local)
+       version = LLCP_VERSION_11;
+       version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
+                                        1, &version_length);
++      if (!version_tlv) {
++              ret = -ENOMEM;
++              goto out;
++      }
+       gb_len += version_length;
+ 
+       lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
++      if (!lto_tlv) {
++              ret = -ENOMEM;
++              goto out;
++      }
+       gb_len += lto_length;
+ 
+       pr_debug("Local wks 0x%lx\n", local->local_wks);
+       wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
++      if (!wks_tlv) {
++              ret = -ENOMEM;
++              goto out;
++      }
+       gb_len += wks_length;
+ 
+       miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
+                                     &miux_length);
++      if (!miux_tlv) {
++              ret = -ENOMEM;
++              goto out;
++      }
+       gb_len += miux_length;
+ 
+       gb_len += ARRAY_SIZE(llcp_magic);
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 2e417c907a28..e9812e21dbc9 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -441,6 +441,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+       int nb = 0;
+       int count = 1;
+       int rc = NET_XMIT_SUCCESS;
++      int rc_drop = NET_XMIT_DROP;
+ 
+       /* Do not fool qdisc_drop_all() */
+       skb->prev = NULL;
+@@ -480,6 +481,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+               q->duplicate = 0;
+               rootq->enqueue(skb2, rootq, to_free);
+               q->duplicate = dupsave;
++              rc_drop = NET_XMIT_SUCCESS;
+       }
+ 
+       /*
+@@ -492,7 +494,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+               if (skb_is_gso(skb)) {
+                       segs = netem_segment(skb, sch, to_free);
+                       if (!segs)
+-                              return NET_XMIT_DROP;
++                              return rc_drop;
+               } else {
+                       segs = skb;
+               }
+@@ -515,8 +517,10 @@ static int netem_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
+                       1<<(prandom_u32() % 8);
+       }
+ 
+-      if (unlikely(sch->q.qlen >= sch->limit))
+-              return qdisc_drop_all(skb, sch, to_free);
++      if (unlikely(sch->q.qlen >= sch->limit)) {
++              qdisc_drop_all(skb, sch, to_free);
++              return rc_drop;
++      }
+ 
+       qdisc_qstats_backlog_inc(sch, skb);
+ 
+diff --git a/net/vmw_vsock/virtio_transport.c 
b/net/vmw_vsock/virtio_transport.c
+index 936d7eee62d0..f66a6010ae07 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -71,6 +71,9 @@ static u32 virtio_transport_get_local_cid(void)
+ {
+       struct virtio_vsock *vsock = virtio_vsock_get();
+ 
++      if (!vsock)
++              return VMADDR_CID_ANY;
++
+       return vsock->guest_cid;
+ }
+ 
+@@ -495,10 +498,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ 
+       virtio_vsock_update_guest_cid(vsock);
+ 
+-      ret = vsock_core_init(&virtio_transport.transport);
+-      if (ret < 0)
+-              goto out_vqs;
+-
+       vsock->rx_buf_nr = 0;
+       vsock->rx_buf_max_nr = 0;
+       atomic_set(&vsock->queued_replies, 0);
+@@ -526,8 +525,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+       mutex_unlock(&the_virtio_vsock_mutex);
+       return 0;
+ 
+-out_vqs:
+-      vsock->vdev->config->del_vqs(vsock->vdev);
+ out:
+       kfree(vsock);
+       mutex_unlock(&the_virtio_vsock_mutex);
+@@ -544,6 +541,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
+       flush_work(&vsock->event_work);
+       flush_work(&vsock->send_pkt_work);
+ 
++      /* Reset all connected sockets when the device disappear */
++      vsock_for_each_connected_socket(virtio_vsock_reset_sock);
++
+       vdev->config->reset(vdev);
+ 
+       mutex_lock(&vsock->rx_lock);
+@@ -567,7 +567,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
+ 
+       mutex_lock(&the_virtio_vsock_mutex);
+       the_virtio_vsock = NULL;
+-      vsock_core_exit();
+       mutex_unlock(&the_virtio_vsock_mutex);
+ 
+       vdev->config->del_vqs(vdev);
+@@ -600,14 +599,28 @@ static int __init virtio_vsock_init(void)
+       virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
+       if (!virtio_vsock_workqueue)
+               return -ENOMEM;
++
+       ret = register_virtio_driver(&virtio_vsock_driver);
+       if (ret)
+-              destroy_workqueue(virtio_vsock_workqueue);
++              goto out_wq;
++
++      ret = vsock_core_init(&virtio_transport.transport);
++      if (ret)
++              goto out_vdr;
++
++      return 0;
++
++out_vdr:
++      unregister_virtio_driver(&virtio_vsock_driver);
++out_wq:
++      destroy_workqueue(virtio_vsock_workqueue);
+       return ret;
++
+ }
+ 
+ static void __exit virtio_vsock_exit(void)
+ {
++      vsock_core_exit();
+       unregister_virtio_driver(&virtio_vsock_driver);
+       destroy_workqueue(virtio_vsock_workqueue);
+ }
+diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
+index 2c0b52264a46..a625cb1500f9 100644
+--- a/tools/perf/util/cpumap.c
++++ b/tools/perf/util/cpumap.c
+@@ -129,7 +129,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
+       if (!cpu_list)
+               return cpu_map__read_all_cpu_map();
+ 
+-      if (!isdigit(*cpu_list))
++      /*
++       * must handle the case of empty cpumap to cover
++       * TOPOLOGY header for NUMA nodes with no CPU
++       * ( e.g., because of CPU hotplug)
++       */
++      if (!isdigit(*cpu_list) && *cpu_list != '\0')
+               goto out;
+ 
+       while (isdigit(*cpu_list)) {
+@@ -176,8 +181,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
+ 
+       if (nr_cpus > 0)
+               cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+-      else
++      else if (*cpu_list != '\0')
+               cpus = cpu_map__default_new();
++      else
++              cpus = cpu_map__dummy_new();
+ invalid:
+       free(tmp_cpus);
+ out:
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index adbc6c02c3aa..20ba5a9aeae4 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -85,6 +85,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+       return GELF_ST_TYPE(sym->st_info);
+ }
+ 
++static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
++{
++      return GELF_ST_VISIBILITY(sym->st_other);
++}
++
+ #ifndef STT_GNU_IFUNC
+ #define STT_GNU_IFUNC 10
+ #endif
+@@ -109,7 +114,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
+       return elf_sym__type(sym) == STT_NOTYPE &&
+               sym->st_name != 0 &&
+               sym->st_shndx != SHN_UNDEF &&
+-              sym->st_shndx != SHN_ABS;
++              sym->st_shndx != SHN_ABS &&
++              elf_sym__visibility(sym) != STV_HIDDEN &&
++              elf_sym__visibility(sym) != STV_INTERNAL;
+ }
+ 
+ static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
+diff --git a/tools/testing/selftests/netfilter/Makefile 
b/tools/testing/selftests/netfilter/Makefile
+index 47ed6cef93fb..c9ff2b47bd1c 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for netfilter selftests
+ 
+-TEST_PROGS := nft_trans_stress.sh
++TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/netfilter/config 
b/tools/testing/selftests/netfilter/config
+index 1017313e41a8..59caa8f71cd8 100644
+--- a/tools/testing/selftests/netfilter/config
++++ b/tools/testing/selftests/netfilter/config
+@@ -1,2 +1,2 @@
+ CONFIG_NET_NS=y
+-NF_TABLES_INET=y
++CONFIG_NF_TABLES_INET=y
+diff --git a/tools/testing/selftests/netfilter/nft_nat.sh 
b/tools/testing/selftests/netfilter/nft_nat.sh
+new file mode 100755
+index 000000000000..8ec76681605c
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/nft_nat.sh
+@@ -0,0 +1,762 @@
++#!/bin/bash
++#
++# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
++#
++
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++ret=0
++
++nft --version > /dev/null 2>&1
++if [ $? -ne 0 ];then
++      echo "SKIP: Could not run test without nft tool"
++      exit $ksft_skip
++fi
++
++ip -Version > /dev/null 2>&1
++if [ $? -ne 0 ];then
++      echo "SKIP: Could not run test without ip tool"
++      exit $ksft_skip
++fi
++
++ip netns add ns0
++ip netns add ns1
++ip netns add ns2
++
++ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
++ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
++
++ip -net ns0 link set lo up
++ip -net ns0 link set veth0 up
++ip -net ns0 addr add 10.0.1.1/24 dev veth0
++ip -net ns0 addr add dead:1::1/64 dev veth0
++
++ip -net ns0 link set veth1 up
++ip -net ns0 addr add 10.0.2.1/24 dev veth1
++ip -net ns0 addr add dead:2::1/64 dev veth1
++
++for i in 1 2; do
++  ip -net ns$i link set lo up
++  ip -net ns$i link set eth0 up
++  ip -net ns$i addr add 10.0.$i.99/24 dev eth0
++  ip -net ns$i route add default via 10.0.$i.1
++  ip -net ns$i addr add dead:$i::99/64 dev eth0
++  ip -net ns$i route add default via dead:$i::1
++done
++
++bad_counter()
++{
++      local ns=$1
++      local counter=$2
++      local expect=$3
++
++      echo "ERROR: $counter counter in $ns has unexpected value (expected 
$expect)" 1>&2
++      ip netns exec $ns nft list counter inet filter $counter 1>&2
++}
++
++check_counters()
++{
++      ns=$1
++      local lret=0
++
++      cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q 
"packets 1 bytes 84")
++      if [ $? -ne 0 ]; then
++              bad_counter $ns ns0in "packets 1 bytes 84"
++              lret=1
++      fi
++      cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q 
"packets 1 bytes 84")
++      if [ $? -ne 0 ]; then
++              bad_counter $ns ns0out "packets 1 bytes 84"
++              lret=1
++      fi
++
++      expect="packets 1 bytes 104"
++      cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q 
"$expect")
++      if [ $? -ne 0 ]; then
++              bad_counter $ns ns0in6 "$expect"
++              lret=1
++      fi
++      cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q 
"$expect")
++      if [ $? -ne 0 ]; then
++              bad_counter $ns ns0out6 "$expect"
++              lret=1
++      fi
++
++      return $lret
++}
++
++check_ns0_counters()
++{
++      local ns=$1
++      local lret=0
++
++      cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q 
"packets 0 bytes 0")
++      if [ $? -ne 0 ]; then
++              bad_counter ns0 ns0in "packets 0 bytes 0"
++              lret=1
++      fi
++
++      cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q 
"packets 0 bytes 0")
++      if [ $? -ne 0 ]; then
++              bad_counter ns0 ns0in6 "packets 0 bytes 0"
++              lret=1
++      fi
++
++      cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q 
"packets 0 bytes 0")
++      if [ $? -ne 0 ]; then
++              bad_counter ns0 ns0out "packets 0 bytes 0"
++              lret=1
++      fi
++      cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q 
"packets 0 bytes 0")
++      if [ $? -ne 0 ]; then
++              bad_counter ns0 ns0out6 "packets 0 bytes 0"
++              lret=1
++      fi
++
++      for dir in "in" "out" ; do
++              expect="packets 1 bytes 84"
++              cnt=$(ip netns exec ns0 nft list counter inet filter 
${ns}${dir} | grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns0 $ns$dir "$expect"
++                      lret=1
++              fi
++
++              expect="packets 1 bytes 104"
++              cnt=$(ip netns exec ns0 nft list counter inet filter 
${ns}${dir}6 | grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns0 $ns$dir6 "$expect"
++                      lret=1
++              fi
++      done
++
++      return $lret
++}
++
++reset_counters()
++{
++      for i in 0 1 2;do
++              ip netns exec ns$i nft reset counters inet > /dev/null
++      done
++}
++
++test_local_dnat6()
++{
++      local lret=0
++ip netns exec ns0 nft -f - <<EOF
++table ip6 nat {
++      chain output {
++              type nat hook output priority 0; policy accept;
++              ip6 daddr dead:1::99 dnat to dead:2::99
++      }
++}
++EOF
++      if [ $? -ne 0 ]; then
++              echo "SKIP: Could not add add ip6 dnat hook"
++              return $ksft_skip
++      fi
++
++      # ping netns1, expect rewrite to netns2
++      ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
++      if [ $? -ne 0 ]; then
++              lret=1
++              echo "ERROR: ping6 failed"
++              return $lret
++      fi
++
++      expect="packets 0 bytes 0"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns0 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      expect="packets 1 bytes 104"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns0 ns2$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # expect 0 count in ns1
++      expect="packets 0 bytes 0"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # expect 1 packet in ns2
++      expect="packets 1 bytes 104"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
++      ip netns exec ns0 nft flush chain ip6 nat output
++
++      return $lret
++}
++
++test_local_dnat()
++{
++      local lret=0
++ip netns exec ns0 nft -f - <<EOF
++table ip nat {
++      chain output {
++              type nat hook output priority 0; policy accept;
++              ip daddr 10.0.1.99 dnat to 10.0.2.99
++      }
++}
++EOF
++      # ping netns1, expect rewrite to netns2
++      ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
++      if [ $? -ne 0 ]; then
++              lret=1
++              echo "ERROR: ping failed"
++              return $lret
++      fi
++
++      expect="packets 0 bytes 0"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns0 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      expect="packets 1 bytes 84"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns0 ns2$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # expect 0 count in ns1
++      expect="packets 0 bytes 0"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # expect 1 packet in ns2
++      expect="packets 1 bytes 84"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
++
++      ip netns exec ns0 nft flush chain ip nat output
++
++      reset_counters
++      ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
++      if [ $? -ne 0 ]; then
++              lret=1
++              echo "ERROR: ping failed"
++              return $lret
++      fi
++
++      expect="packets 1 bytes 84"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++      expect="packets 0 bytes 0"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns0 ns2$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # expect 1 count in ns1
++      expect="packets 1 bytes 84"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns0 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # expect 0 packet in ns2
++      expect="packets 0 bytes 0"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns2$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain 
flush"
++
++      return $lret
++}
++
++
++test_masquerade6()
++{
++      local lret=0
++
++      ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
++
++      ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
++      if [ $? -ne 0 ] ; then
++              echo "ERROR: cannot ping ns1 from ns2 via ipv6"
++              return 1
++              lret=1
++      fi
++
++      expect="packets 1 bytes 104"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns2$dir "$expect"
++                      lret=1
++              fi
++
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      reset_counters
++
++# add masquerading rule
++ip netns exec ns0 nft -f - <<EOF
++table ip6 nat {
++      chain postrouting {
++              type nat hook postrouting priority 0; policy accept;
++              meta oif veth0 masquerade
++      }
++}
++EOF
++      ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
++      if [ $? -ne 0 ] ; then
++              echo "ERROR: cannot ping ns1 from ns2 with active ipv6 
masquerading"
++              lret=1
++      fi
++
++      # ns1 should have seen packets from ns0, due to masquerade
++      expect="packets 1 bytes 104"
++      for dir in "in6" "out6" ; do
++
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # ns1 should not have seen packets from ns2, due to masquerade
++      expect="packets 0 bytes 0"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      ip netns exec ns0 nft flush chain ip6 nat postrouting
++      if [ $? -ne 0 ]; then
++              echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
++              lret=1
++      fi
++
++      test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
++
++      return $lret
++}
++
++test_masquerade()
++{
++      local lret=0
++
++      ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
++      ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
++
++      ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
++      if [ $? -ne 0 ] ; then
++              echo "ERROR: canot ping ns1 from ns2"
++              lret=1
++      fi
++
++      expect="packets 1 bytes 84"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns2$dir "$expect"
++                      lret=1
++              fi
++
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      reset_counters
++
++# add masquerading rule
++ip netns exec ns0 nft -f - <<EOF
++table ip nat {
++      chain postrouting {
++              type nat hook postrouting priority 0; policy accept;
++              meta oif veth0 masquerade
++      }
++}
++EOF
++      ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
++      if [ $? -ne 0 ] ; then
++              echo "ERROR: cannot ping ns1 from ns2 with active ip 
masquerading"
++              lret=1
++      fi
++
++      # ns1 should have seen packets from ns0, due to masquerade
++      expect="packets 1 bytes 84"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # ns1 should not have seen packets from ns2, due to masquerade
++      expect="packets 0 bytes 0"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      ip netns exec ns0 nft flush chain ip nat postrouting
++      if [ $? -ne 0 ]; then
++              echo "ERROR: Could not flush nat postrouting" 1>&2
++              lret=1
++      fi
++
++      test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
++
++      return $lret
++}
++
++test_redirect6()
++{
++      local lret=0
++
++      ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
++
++      ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
++      if [ $? -ne 0 ] ; then
++              echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
++              lret=1
++      fi
++
++      expect="packets 1 bytes 104"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns2$dir "$expect"
++                      lret=1
++              fi
++
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      reset_counters
++
++# add redirect rule
++ip netns exec ns0 nft -f - <<EOF
++table ip6 nat {
++      chain prerouting {
++              type nat hook prerouting priority 0; policy accept;
++              meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 
daddr dead:1::99 redirect
++      }
++}
++EOF
++      ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
++      if [ $? -ne 0 ] ; then
++              echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
++              lret=1
++      fi
++
++      # ns1 should have seen no packets from ns2, due to redirection
++      expect="packets 0 bytes 0"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # ns0 should have seen packets from ns2, due to masquerade
++      expect="packets 1 bytes 104"
++      for dir in "in6" "out6" ; do
++              cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      ip netns exec ns0 nft delete table ip6 nat
++      if [ $? -ne 0 ]; then
++              echo "ERROR: Could not delete ip6 nat table" 1>&2
++              lret=1
++      fi
++
++      test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
++
++      return $lret
++}
++
++test_redirect()
++{
++      local lret=0
++
++      ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
++      ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
++
++      ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
++      if [ $? -ne 0 ] ; then
++              echo "ERROR: cannot ping ns1 from ns2"
++              lret=1
++      fi
++
++      expect="packets 1 bytes 84"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns2$dir "$expect"
++                      lret=1
++              fi
++
++              cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns2 ns1$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      reset_counters
++
++# add redirect rule
++ip netns exec ns0 nft -f - <<EOF
++table ip nat {
++      chain prerouting {
++              type nat hook prerouting priority 0; policy accept;
++              meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 
10.0.1.99 redirect
++      }
++}
++EOF
++      ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
++      if [ $? -ne 0 ] ; then
++              echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
++              lret=1
++      fi
++
++      # ns1 should have seen no packets from ns2, due to redirection
++      expect="packets 0 bytes 0"
++      for dir in "in" "out" ; do
++
++              cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      # ns0 should have seen packets from ns2, due to masquerade
++      expect="packets 1 bytes 84"
++      for dir in "in" "out" ; do
++              cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} 
| grep -q "$expect")
++              if [ $? -ne 0 ]; then
++                      bad_counter ns1 ns0$dir "$expect"
++                      lret=1
++              fi
++      done
++
++      ip netns exec ns0 nft delete table ip nat
++      if [ $? -ne 0 ]; then
++              echo "ERROR: Could not delete nat table" 1>&2
++              lret=1
++      fi
++
++      test $lret -eq 0 && echo "PASS: IP redirection for ns2"
++
++      return $lret
++}
++
++
++# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
++for i in 0 1 2; do
++ip netns exec ns$i nft -f - <<EOF
++table inet filter {
++      counter ns0in {}
++      counter ns1in {}
++      counter ns2in {}
++
++      counter ns0out {}
++      counter ns1out {}
++      counter ns2out {}
++
++      counter ns0in6 {}
++      counter ns1in6 {}
++      counter ns2in6 {}
++
++      counter ns0out6 {}
++      counter ns1out6 {}
++      counter ns2out6 {}
++
++      map nsincounter {
++              type ipv4_addr : counter
++              elements = { 10.0.1.1 : "ns0in",
++                           10.0.2.1 : "ns0in",
++                           10.0.1.99 : "ns1in",
++                           10.0.2.99 : "ns2in" }
++      }
++
++      map nsincounter6 {
++              type ipv6_addr : counter
++              elements = { dead:1::1 : "ns0in6",
++                           dead:2::1 : "ns0in6",
++                           dead:1::99 : "ns1in6",
++                           dead:2::99 : "ns2in6" }
++      }
++
++      map nsoutcounter {
++              type ipv4_addr : counter
++              elements = { 10.0.1.1 : "ns0out",
++                           10.0.2.1 : "ns0out",
++                           10.0.1.99: "ns1out",
++                           10.0.2.99: "ns2out" }
++      }
++
++      map nsoutcounter6 {
++              type ipv6_addr : counter
++              elements = { dead:1::1 : "ns0out6",
++                           dead:2::1 : "ns0out6",
++                           dead:1::99 : "ns1out6",
++                           dead:2::99 : "ns2out6" }
++      }
++
++      chain input {
++              type filter hook input priority 0; policy accept;
++              counter name ip saddr map @nsincounter
++              icmpv6 type { "echo-request", "echo-reply" } counter name ip6 
saddr map @nsincounter6
++      }
++      chain output {
++              type filter hook output priority 0; policy accept;
++              counter name ip daddr map @nsoutcounter
++              icmpv6 type { "echo-request", "echo-reply" } counter name ip6 
daddr map @nsoutcounter6
++      }
++}
++EOF
++done
++
++sleep 3
++# test basic connectivity
++for i in 1 2; do
++  ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
++  if [ $? -ne 0 ];then
++      echo "ERROR: Could not reach other namespace(s)" 1>&2
++      ret=1
++  fi
++
++  ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
++  if [ $? -ne 0 ];then
++      echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
++      ret=1
++  fi
++  check_counters ns$i
++  if [ $? -ne 0 ]; then
++      ret=1
++  fi
++
++  check_ns0_counters ns$i
++  if [ $? -ne 0 ]; then
++      ret=1
++  fi
++  reset_counters
++done
++
++if [ $ret -eq 0 ];then
++      echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
++fi
++
++reset_counters
++test_local_dnat
++test_local_dnat6
++
++reset_counters
++test_masquerade
++test_masquerade6
++
++reset_counters
++test_redirect
++test_redirect6
++
++for i in 0 1 2; do ip netns del ns$i;done
++
++exit $ret

Reply via email to