commit:     9fd97c132540e56fdedd554fa601f0bb905da89a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 21 14:40:03 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 29 13:59:02 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9fd97c13

Linux patch 4.14.134

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1133_linux-4.14.134.patch | 3972 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3976 insertions(+)

diff --git a/0000_README b/0000_README
index dd56b3e..befc228 100644
--- a/0000_README
+++ b/0000_README
@@ -575,6 +575,10 @@ Patch:  1132_linux-4.14.133.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.133
 
+Patch:  1133_linux-4.14.134.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.134
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1133_linux-4.14.134.patch b/1133_linux-4.14.134.patch
new file mode 100644
index 0000000..3ba280f
--- /dev/null
+++ b/1133_linux-4.14.134.patch
@@ -0,0 +1,3972 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-net-qmi 
b/Documentation/ABI/testing/sysfs-class-net-qmi
+index 7122d6264c49..c310db4ccbc2 100644
+--- a/Documentation/ABI/testing/sysfs-class-net-qmi
++++ b/Documentation/ABI/testing/sysfs-class-net-qmi
+@@ -29,7 +29,7 @@ Contact:     Bjørn Mork <bj...@mork.no>
+ Description:
+               Unsigned integer.
+ 
+-              Write a number ranging from 1 to 127 to add a qmap mux
++              Write a number ranging from 1 to 254 to add a qmap mux
+               based network device, supported by recent Qualcomm based
+               modems.
+ 
+@@ -46,5 +46,5 @@ Contact:     Bjørn Mork <bj...@mork.no>
+ Description:
+               Unsigned integer.
+ 
+-              Write a number ranging from 1 to 127 to delete a previously
++              Write a number ranging from 1 to 254 to delete a previously
+               created qmap mux based network device.
+diff --git a/Documentation/admin-guide/hw-vuln/index.rst 
b/Documentation/admin-guide/hw-vuln/index.rst
+index ffc064c1ec68..49311f3da6f2 100644
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -9,5 +9,6 @@ are configurable at compile, boot or run time.
+ .. toctree::
+    :maxdepth: 1
+ 
++   spectre
+    l1tf
+    mds
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst 
b/Documentation/admin-guide/hw-vuln/spectre.rst
+new file mode 100644
+index 000000000000..25f3b2532198
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -0,0 +1,697 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++Spectre Side Channels
++=====================
++
++Spectre is a class of side channel attacks that exploit branch prediction
++and speculative execution on modern CPUs to read memory, possibly
++bypassing access controls. Speculative execution side channel exploits
++do not modify memory but attempt to infer privileged data in the memory.
++
++This document covers Spectre variant 1 and Spectre variant 2.
++
++Affected processors
++-------------------
++
++Speculative execution side channel methods affect a wide range of modern
++high performance processors, since most modern high speed processors
++use branch prediction and speculative execution.
++
++The following CPUs are vulnerable:
++
++    - Intel Core, Atom, Pentium, and Xeon processors
++
++    - AMD Phenom, EPYC, and Zen processors
++
++    - IBM POWER and zSeries processors
++
++    - Higher end ARM processors
++
++    - Apple CPUs
++
++    - Higher end MIPS CPUs
++
++    - Likely most other high performance CPUs. Contact your CPU vendor for 
details.
++
++Whether a processor is affected or not can be read out from the Spectre
++vulnerability files in sysfs. See :ref:`spectre_sys_info`.
++
++Related CVEs
++------------
++
++The following CVE entries describe Spectre variants:
++
++   =============   =======================  =================
++   CVE-2017-5753   Bounds check bypass      Spectre variant 1
++   CVE-2017-5715   Branch target injection  Spectre variant 2
++   =============   =======================  =================
++
++Problem
++-------
++
++CPUs use speculative operations to improve performance. That may leave
++traces of memory accesses or computations in the processor's caches,
++buffers, and branch predictors. Malicious software may be able to
++influence the speculative execution paths, and then use the side effects
++of the speculative execution in the CPUs' caches and buffers to infer
++privileged data touched during the speculative execution.
++
++Spectre variant 1 attacks take advantage of speculative execution of
++conditional branches, while Spectre variant 2 attacks use speculative
++execution of indirect branches to leak privileged memory.
++See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[7] <spec_ref7>`
++:ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
++
++Spectre variant 1 (Bounds Check Bypass)
++---------------------------------------
++
++The bounds check bypass attack :ref:`[2] <spec_ref2>` takes advantage
++of speculative execution that bypasses conditional branch instructions
++used for memory access bounds check (e.g. checking if the index of an
++array results in memory access within a valid range). This results in
++memory accesses to invalid memory (with out-of-bound index) that are
++done speculatively before validation checks resolve. Such speculative
++memory accesses can leave side effects, creating side channels which
++leak information to the attacker.
++
++There are some extensions of Spectre variant 1 attacks for reading data
++over the network, see :ref:`[12] <spec_ref12>`. However such attacks
++are difficult, low bandwidth, fragile, and are considered low risk.
++
++Spectre variant 2 (Branch Target Injection)
++-------------------------------------------
++
++The branch target injection attack takes advantage of speculative
++execution of indirect branches :ref:`[3] <spec_ref3>`.  The indirect
++branch predictors inside the processor used to guess the target of
++indirect branches can be influenced by an attacker, causing gadget code
++to be speculatively executed, thus exposing sensitive data touched by
++the victim. The side effects left in the CPU's caches during speculative
++execution can be measured to infer data values.
++
++.. _poison_btb:
++
++In Spectre variant 2 attacks, the attacker can steer speculative indirect
++branches in the victim to gadget code by poisoning the branch target
++buffer of a CPU used for predicting indirect branch addresses. Such
++poisoning could be done by indirect branching into existing code,
++with the address offset of the indirect branch under the attacker's
++control. Since the branch prediction on impacted hardware does not
++fully disambiguate branch address and uses the offset for prediction,
++this could cause privileged code's indirect branch to jump to a gadget
++code with the same offset.
++
++The most useful gadgets take an attacker-controlled input parameter (such
++as a register value) so that the memory read can be controlled. Gadgets
++without input parameters might be possible, but the attacker would have
++very little control over what memory can be read, reducing the risk of
++the attack revealing useful data.
++
++One other variant 2 attack vector is for the attacker to poison the
++return stack buffer (RSB) :ref:`[13] <spec_ref13>` to cause speculative
++subroutine return instruction execution to go to a gadget.  An attacker's
++imbalanced subroutine call instructions might "poison" entries in the
++return stack buffer which are later consumed by a victim's subroutine
++return instructions.  This attack can be mitigated by flushing the return
++stack buffer on context switch, or virtual machine (VM) exit.
++
++On systems with simultaneous multi-threading (SMT), attacks are possible
++from the sibling thread, as level 1 cache and branch target buffer
++(BTB) may be shared between hardware threads in a CPU core.  A malicious
++program running on the sibling thread may influence its peer's BTB to
++steer its indirect branch speculations to gadget code, and measure the
++speculative execution's side effects left in level 1 cache to infer the
++victim's data.
++
++Attack scenarios
++----------------
++
++The following list of attack scenarios have been anticipated, but may
++not cover all possible attack vectors.
++
++1. A user process attacking the kernel
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   The attacker passes a parameter to the kernel via a register or
++   via a known address in memory during a syscall. Such parameter may
++   be used later by the kernel as an index to an array or to derive
++   a pointer for a Spectre variant 1 attack.  The index or pointer
++   is invalid, but bound checks are bypassed in the code branch taken
++   for speculative execution. This could cause privileged memory to be
++   accessed and leaked.
++
++   For kernel code that has been identified where data pointers could
++   potentially be influenced for Spectre attacks, new "nospec" accessor
++   macros are used to prevent speculative loading of data.
++
++   Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
++   target buffer (BTB) before issuing syscall to launch an attack.
++   After entering the kernel, the kernel could use the poisoned branch
++   target buffer on indirect jump and jump to gadget code in speculative
++   execution.
++
++   If an attacker tries to control the memory addresses leaked during
++   speculative execution, he would also need to pass a parameter to the
++   gadget, either through a register or a known address in memory. After
++   the gadget has executed, he can measure the side effect.
++
++   The kernel can protect itself against consuming poisoned branch
++   target buffer entries by using return trampolines (also known as
++   "retpoline") :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` for all
++   indirect branches. Return trampolines trap speculative execution paths
++   to prevent jumping to gadget code during speculative execution.
++   x86 CPUs with Enhanced Indirect Branch Restricted Speculation
++   (Enhanced IBRS) available in hardware should use the feature to
++   mitigate Spectre variant 2 instead of retpoline. Enhanced IBRS is
++   more efficient than retpoline.
++
++   There may be gadget code in firmware which could be exploited with
++   Spectre variant 2 attack by a rogue user process. To mitigate such
++   attacks on x86, Indirect Branch Restricted Speculation (IBRS) feature
++   is turned on before the kernel invokes any firmware code.
++
++2. A user process attacking another user process
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   A malicious user process can try to attack another user process,
++   either via a context switch on the same hardware thread, or from the
++   sibling hyperthread sharing a physical processor core on simultaneous
++   multi-threading (SMT) system.
++
++   Spectre variant 1 attacks generally require passing parameters
++   between the processes, which needs a data passing relationship, such
++   as remote procedure calls (RPC).  Those parameters are used in gadget
++   code to derive invalid data pointers accessing privileged memory in
++   the attacked process.
++
++   Spectre variant 2 attacks can be launched from a rogue process by
++   :ref:`poisoning <poison_btb>` the branch target buffer.  This can
++   influence the indirect branch targets for a victim process that either
++   runs later on the same hardware thread, or running concurrently on
++   a sibling hardware thread sharing the same physical core.
++
++   A user process can protect itself against Spectre variant 2 attacks
++   by using the prctl() syscall to disable indirect branch speculation
++   for itself.  An administrator can also cordon off an unsafe process
++   from polluting the branch target buffer by disabling the process's
++   indirect branch speculation. This comes with a performance cost
++   from not using indirect branch speculation and clearing the branch
++   target buffer.  When SMT is enabled on x86, for a process that has
++   indirect branch speculation disabled, Single Threaded Indirect Branch
++   Predictors (STIBP) :ref:`[4] <spec_ref4>` are turned on to prevent the
++   sibling thread from controlling branch target buffer.  In addition,
++   the Indirect Branch Prediction Barrier (IBPB) is issued to clear the
++   branch target buffer when context switching to and from such process.
++
++   On x86, the return stack buffer is stuffed on context switch.
++   This prevents the branch target buffer from being used for branch
++   prediction when the return stack buffer underflows while switching to
++   a deeper call stack. Any poisoned entries in the return stack buffer
++   left by the previous process will also be cleared.
++
++   User programs should use address space randomization to make attacks
++   more difficult (Set /proc/sys/kernel/randomize_va_space = 1 or 2).
++
++3. A virtualized guest attacking the host
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   The attack mechanism is similar to how user processes attack the
++   kernel.  The kernel is entered via hyper-calls or other virtualization
++   exit paths.
++
++   For Spectre variant 1 attacks, rogue guests can pass parameters
++   (e.g. in registers) via hyper-calls to derive invalid pointers to
++   speculate into privileged memory after entering the kernel.  For places
++   where such kernel code has been identified, nospec accessor macros
++   are used to stop speculative memory access.
++
++   For Spectre variant 2 attacks, rogue guests can :ref:`poison
++   <poison_btb>` the branch target buffer or return stack buffer, causing
++   the kernel to jump to gadget code in the speculative execution paths.
++
++   To mitigate variant 2, the host kernel can use return trampolines
++   for indirect branches to bypass the poisoned branch target buffer,
++   and flushing the return stack buffer on VM exit.  This prevents rogue
++   guests from affecting indirect branching in the host kernel.
++
++   To protect host processes from rogue guests, host processes can have
++   indirect branch speculation disabled via prctl().  The branch target
++   buffer is cleared before context switching to such processes.
++
++4. A virtualized guest attacking other guest
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   A rogue guest may attack another guest to get data accessible by the
++   other guest.
++
++   Spectre variant 1 attacks are possible if parameters can be passed
++   between guests.  This may be done via mechanisms such as shared memory
++   or message passing.  Such parameters could be used to derive data
++   pointers to privileged data in guest.  The privileged data could be
++   accessed by gadget code in the victim's speculation paths.
++
++   Spectre variant 2 attacks can be launched from a rogue guest by
++   :ref:`poisoning <poison_btb>` the branch target buffer or the return
++   stack buffer. Such poisoned entries could be used to influence
++   speculation execution paths in the victim guest.
++
++   Linux kernel mitigates attacks to other guests running in the same
++   CPU hardware thread by flushing the return stack buffer on VM exit,
++   and clearing the branch target buffer before switching to a new guest.
++
++   If SMT is used, Spectre variant 2 attacks from an untrusted guest
++   in the sibling hyperthread can be mitigated by the administrator,
++   by turning off the unsafe guest's indirect branch speculation via
++   prctl().  A guest can also protect itself by turning on microcode
++   based mitigations (such as IBPB or STIBP on x86) within the guest.
++
++.. _spectre_sys_info:
++
++Spectre system information
++--------------------------
++
++The Linux kernel provides a sysfs interface to enumerate the current
++mitigation status of the system for Spectre: whether the system is
++vulnerable, and which mitigations are active.
++
++The sysfs file showing Spectre variant 1 mitigation status is:
++
++   /sys/devices/system/cpu/vulnerabilities/spectre_v1
++
++The possible values in this file are:
++
++  =======================================  =================================
++  'Mitigation: __user pointer sanitation'  Protection in kernel on a case by
++                                           case base with explicit pointer
++                                           sanitation.
++  =======================================  =================================
++
++However, the protections are put in place on a case by case basis,
++and there is no guarantee that all possible attack vectors for Spectre
++variant 1 are covered.
++
++The spectre_v2 kernel file reports if the kernel has been compiled with
++retpoline mitigation or if the CPU has hardware mitigation, and if the
++CPU has support for additional process-specific mitigation.
++
++This file also reports CPU features enabled by microcode to mitigate
++attack between user processes:
++
++1. Indirect Branch Prediction Barrier (IBPB) to add additional
++   isolation between processes of different users.
++2. Single Thread Indirect Branch Predictors (STIBP) to add additional
++   isolation between CPU threads running on the same core.
++
++These CPU features may impact performance when used and can be enabled
++per process on a case-by-case base.
++
++The sysfs file showing Spectre variant 2 mitigation status is:
++
++   /sys/devices/system/cpu/vulnerabilities/spectre_v2
++
++The possible values in this file are:
++
++  - Kernel status:
++
++  ====================================  =================================
++  'Not affected'                        The processor is not vulnerable
++  'Vulnerable'                          Vulnerable, no mitigation
++  'Mitigation: Full generic retpoline'  Software-focused mitigation
++  'Mitigation: Full AMD retpoline'      AMD-specific software mitigation
++  'Mitigation: Enhanced IBRS'           Hardware-focused mitigation
++  ====================================  =================================
++
++  - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is
++    used to protect against Spectre variant 2 attacks when calling firmware 
(x86 only).
++
++  ========== =============================================================
++  'IBRS_FW'  Protection against user program attacks when calling firmware
++  ========== =============================================================
++
++  - Indirect branch prediction barrier (IBPB) status for protection between
++    processes of different users. This feature can be controlled through
++    prctl() per process, or through kernel command line options. This is
++    an x86 only feature. For more details see below.
++
++  ===================   
========================================================
++  'IBPB: disabled'      IBPB unused
++  'IBPB: always-on'     Use IBPB on all tasks
++  'IBPB: conditional'   Use IBPB on SECCOMP or indirect branch restricted 
tasks
++  ===================   
========================================================
++
++  - Single threaded indirect branch prediction (STIBP) status for protection
++    between different hyper threads. This feature can be controlled through
++    prctl per process, or through kernel command line options. This is x86
++    only feature. For more details see below.
++
++  ====================  
========================================================
++  'STIBP: disabled'     STIBP unused
++  'STIBP: forced'       Use STIBP on all tasks
++  'STIBP: conditional'  Use STIBP on SECCOMP or indirect branch restricted 
tasks
++  ====================  
========================================================
++
++  - Return stack buffer (RSB) protection status:
++
++  =============   ===========================================
++  'RSB filling'   Protection of RSB on context switch enabled
++  =============   ===========================================
++
++Full mitigation might require a microcode update from the CPU
++vendor. When the necessary microcode is not available, the kernel will
++report vulnerability.
++
++Turning on mitigation for Spectre variant 1 and Spectre variant 2
++-----------------------------------------------------------------
++
++1. Kernel mitigation
++^^^^^^^^^^^^^^^^^^^^
++
++   For the Spectre variant 1, vulnerable kernel code (as determined
++   by code audit or scanning tools) is annotated on a case by case
++   basis to use nospec accessor macros for bounds clipping :ref:`[2]
++   <spec_ref2>` to avoid any usable disclosure gadgets. However, it may
++   not cover all attack vectors for Spectre variant 1.
++
++   For Spectre variant 2 mitigation, the compiler turns indirect calls or
++   jumps in the kernel into equivalent return trampolines (retpolines)
++   :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
++   addresses.  Speculative execution paths under retpolines are trapped
++   in an infinite loop to prevent any speculative execution jumping to
++   a gadget.
++
++   To turn on retpoline mitigation on a vulnerable CPU, the kernel
++   needs to be compiled with a gcc compiler that supports the
++   -mindirect-branch=thunk-extern -mindirect-branch-register options.
++   If the kernel is compiled with a Clang compiler, the compiler needs
++   to support -mretpoline-external-thunk option.  The kernel config
++   CONFIG_RETPOLINE needs to be turned on, and the CPU needs to run with
++   the latest updated microcode.
++
++   On Intel Skylake-era systems the mitigation covers most, but not all,
++   cases. See :ref:`[3] <spec_ref3>` for more details.
++
++   On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced
++   IBRS on x86), retpoline is automatically disabled at run time.
++
++   The retpoline mitigation is turned on by default on vulnerable
++   CPUs. It can be forced on or off by the administrator
++   via the kernel command line and sysfs control files. See
++   :ref:`spectre_mitigation_control_command_line`.
++
++   On x86, indirect branch restricted speculation is turned on by default
++   before invoking any firmware code to prevent Spectre variant 2 exploits
++   using the firmware.
++
++   Using kernel address space randomization (CONFIG_RANDOMIZE_SLAB=y
++   and CONFIG_SLAB_FREELIST_RANDOM=y in the kernel configuration) makes
++   attacks on the kernel generally more difficult.
++
++2. User program mitigation
++^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   User programs can mitigate Spectre variant 1 using LFENCE or "bounds
++   clipping". For more details see :ref:`[2] <spec_ref2>`.
++
++   For Spectre variant 2 mitigation, individual user programs
++   can be compiled with return trampolines for indirect branches.
++   This protects them from consuming poisoned entries in the branch
++   target buffer left by malicious software.  Alternatively, the
++   programs can disable their indirect branch speculation via prctl()
++   (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
++   On x86, this will turn on STIBP to guard against attacks from the
++   sibling thread when the user program is running, and use IBPB to
++   flush the branch target buffer when switching to/from the program.
++
++   Restricting indirect branch speculation on a user program will
++   also prevent the program from launching a variant 2 attack
++   on x86.  All sand-boxed SECCOMP programs have indirect branch
++   speculation restricted by default.  Administrators can change
++   that behavior via the kernel command line and sysfs control files.
++   See :ref:`spectre_mitigation_control_command_line`.
++
++   Programs that disable their indirect branch speculation will have
++   more overhead and run slower.
++
++   User programs should use address space randomization
++   (/proc/sys/kernel/randomize_va_space = 1 or 2) to make attacks more
++   difficult.
++
++3. VM mitigation
++^^^^^^^^^^^^^^^^
++
++   Within the kernel, Spectre variant 1 attacks from rogue guests are
++   mitigated on a case by case basis in VM exit paths. Vulnerable code
++   uses nospec accessor macros for "bounds clipping", to avoid any
++   usable disclosure gadgets.  However, this may not cover all variant
++   1 attack vectors.
++
++   For Spectre variant 2 attacks from rogue guests to the kernel, the
++   Linux kernel uses retpoline or Enhanced IBRS to prevent consumption of
++   poisoned entries in branch target buffer left by rogue guests.  It also
++   flushes the return stack buffer on every VM exit to prevent a return
++   stack buffer underflow so poisoned branch target buffer could be used,
++   or attacker guests leaving poisoned entries in the return stack buffer.
++
++   To mitigate guest-to-guest attacks in the same CPU hardware thread,
++   the branch target buffer is sanitized by flushing before switching
++   to a new guest on a CPU.
++
++   The above mitigations are turned on by default on vulnerable CPUs.
++
++   To mitigate guest-to-guest attacks from sibling thread when SMT is
++   in use, an untrusted guest running in the sibling thread can have
++   its indirect branch speculation disabled by administrator via prctl().
++
++   The kernel also allows guests to use any microcode based mitigation
++   they choose to use (such as IBPB or STIBP on x86) to protect themselves.
++
++.. _spectre_mitigation_control_command_line:
++
++Mitigation control on the kernel command line
++---------------------------------------------
++
++Spectre variant 2 mitigation can be disabled or force enabled at the
++kernel command line.
++
++      nospectre_v2
++
++              [X86] Disable all mitigations for the Spectre variant 2
++              (indirect branch prediction) vulnerability. System may
++              allow data leaks with this option, which is equivalent
++              to spectre_v2=off.
++
++
++        spectre_v2=
++
++              [X86] Control mitigation of Spectre variant 2
++              (indirect branch speculation) vulnerability.
++              The default operation protects the kernel from
++              user space attacks.
++
++              on
++                      unconditionally enable, implies
++                      spectre_v2_user=on
++              off
++                      unconditionally disable, implies
++                      spectre_v2_user=off
++              auto
++                      kernel detects whether your CPU model is
++                      vulnerable
++
++              Selecting 'on' will, and 'auto' may, choose a
++              mitigation method at run time according to the
++              CPU, the available microcode, the setting of the
++              CONFIG_RETPOLINE configuration option, and the
++              compiler with which the kernel was built.
++
++              Selecting 'on' will also enable the mitigation
++              against user space to user space task attacks.
++
++              Selecting 'off' will disable both the kernel and
++              the user space protections.
++
++              Specific mitigations can also be selected manually:
++
++              retpoline
++                                      replace indirect branches
++              retpoline,generic
++                                      google's original retpoline
++              retpoline,amd
++                                      AMD-specific minimal thunk
++
++              Not specifying this option is equivalent to
++              spectre_v2=auto.
++
++For user space mitigation:
++
++        spectre_v2_user=
++
++              [X86] Control mitigation of Spectre variant 2
++              (indirect branch speculation) vulnerability between
++              user space tasks
++
++              on
++                      Unconditionally enable mitigations. Is
++                      enforced by spectre_v2=on
++
++              off
++                      Unconditionally disable mitigations. Is
++                      enforced by spectre_v2=off
++
++              prctl
++                      Indirect branch speculation is enabled,
++                      but mitigation can be enabled via prctl
++                      per thread. The mitigation control state
++                      is inherited on fork.
++
++              prctl,ibpb
++                      Like "prctl" above, but only STIBP is
++                      controlled per thread. IBPB is issued
++                      always when switching between different user
++                      space processes.
++
++              seccomp
++                      Same as "prctl" above, but all seccomp
++                      threads will enable the mitigation unless
++                      they explicitly opt out.
++
++              seccomp,ibpb
++                      Like "seccomp" above, but only STIBP is
++                      controlled per thread. IBPB is issued
++                      always when switching between different
++                      user space processes.
++
++              auto
++                      Kernel selects the mitigation depending on
++                      the available CPU features and vulnerability.
++
++              Default mitigation:
++              If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
++
++              Not specifying this option is equivalent to
++              spectre_v2_user=auto.
++
++              In general the kernel by default selects
++              reasonable mitigations for the current CPU. To
++              disable Spectre variant 2 mitigations, boot with
++              spectre_v2=off. Spectre variant 1 mitigations
++              cannot be disabled.
++
++Mitigation selection guide
++--------------------------
++
++1. Trusted userspace
++^^^^^^^^^^^^^^^^^^^^
++
++   If all userspace applications are from trusted sources and do not
++   execute externally supplied untrusted code, then the mitigations can
++   be disabled.
++
++2. Protect sensitive programs
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   For security-sensitive programs that have secrets (e.g. crypto
++   keys), protection against Spectre variant 2 can be put in place by
++   disabling indirect branch speculation when the program is running
++   (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
++
++3. Sandbox untrusted programs
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   Untrusted programs that could be a source of attacks can be cordoned
++   off by disabling their indirect branch speculation when they are run
++   (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
++   This prevents untrusted programs from polluting the branch target
++   buffer.  All programs running in SECCOMP sandboxes have indirect
++   branch speculation restricted by default. This behavior can be
++   changed via the kernel command line and sysfs control files. See
++   :ref:`spectre_mitigation_control_command_line`.
++
++3. High security mode
++^^^^^^^^^^^^^^^^^^^^^
++
++   All Spectre variant 2 mitigations can be forced on
++   at boot time for all programs (See the "on" option in
++   :ref:`spectre_mitigation_control_command_line`).  This will add
++   overhead as indirect branch speculations for all programs will be
++   restricted.
++
++   On x86, branch target buffer will be flushed with IBPB when switching
++   to a new program. STIBP is left on all the time to protect programs
++   against variant 2 attacks originating from programs running on
++   sibling threads.
++
++   Alternatively, STIBP can be used only when running programs
++   whose indirect branch speculation is explicitly disabled,
++   while IBPB is still used all the time when switching to a new
++   program to clear the branch target buffer (See "ibpb" option in
++   :ref:`spectre_mitigation_control_command_line`).  This "ibpb" option
++   has less performance cost than the "on" option, which leaves STIBP
++   on all the time.
++
++References on Spectre
++---------------------
++
++Intel white papers:
++
++.. _spec_ref1:
++
++[1] `Intel analysis of speculative execution side channels 
<https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/Intel-Analysis-of-Speculative-Execution-Side-Channels.pdf>`_.
++
++.. _spec_ref2:
++
++[2] `Bounds check bypass 
<https://software.intel.com/security-software-guidance/software-guidance/bounds-check-bypass>`_.
++
++.. _spec_ref3:
++
++[3] `Deep dive: Retpoline: A branch target injection mitigation 
<https://software.intel.com/security-software-guidance/insights/deep-dive-retpoline-branch-target-injection-mitigation>`_.
++
++.. _spec_ref4:
++
++[4] `Deep Dive: Single Thread Indirect Branch Predictors 
<https://software.intel.com/security-software-guidance/insights/deep-dive-single-thread-indirect-branch-predictors>`_.
++
++AMD white papers:
++
++.. _spec_ref5:
++
++[5] `AMD64 technology indirect branch control extension 
<https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf>`_.
++
++.. _spec_ref6:
++
++[6] `Software techniques for managing speculation on AMD processors 
<https://developer.amd.com/wp-content/resources/90343-B_SoftwareTechniquesforManagingSpeculation_WP_7-18Update_FNL.pdf>`_.
++
++ARM white papers:
++
++.. _spec_ref7:
++
++[7] `Cache speculation side-channels 
<https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability/download-the-whitepaper>`_.
++
++.. _spec_ref8:
++
++[8] `Cache speculation issues update 
<https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability/latest-updates/cache-speculation-issues-update>`_.
++
++Google white paper:
++
++.. _spec_ref9:
++
++[9] `Retpoline: a software construct for preventing branch-target-injection 
<https://support.google.com/faqs/answer/7625886>`_.
++
++MIPS white paper:
++
++.. _spec_ref10:
++
++[10] `MIPS: response on speculative execution and side channel 
vulnerabilities 
<https://www.mips.com/blog/mips-response-on-speculative-execution-and-side-channel-vulnerabilities/>`_.
++
++Academic papers:
++
++.. _spec_ref11:
++
++[11] `Spectre Attacks: Exploiting Speculative Execution 
<https://spectreattack.com/spectre.pdf>`_.
++
++.. _spec_ref12:
++
++[12] `NetSpectre: Read Arbitrary Memory over Network 
<https://arxiv.org/abs/1807.10535>`_.
++
++.. _spec_ref13:
++
++[13] `Spectre Returns! Speculation Attacks using the Return Stack Buffer 
<https://www.usenix.org/system/files/conference/woot18/woot18-paper-koruyeh.pdf>`_.
+diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt 
b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+index ee3723beb701..33b38716b77f 100644
+--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
++++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+@@ -4,6 +4,7 @@ Required properties:
+  - compatible: Should be one of the following:
+    - "microchip,mcp2510" for MCP2510.
+    - "microchip,mcp2515" for MCP2515.
++   - "microchip,mcp25625" for MCP25625.
+  - reg: SPI chip select.
+  - clocks: The clock feeding the CAN controller.
+  - interrupt-parent: The parent interrupt controller.
+diff --git a/Documentation/userspace-api/spec_ctrl.rst 
b/Documentation/userspace-api/spec_ctrl.rst
+index c4dbe6f7cdae..0fda8f614110 100644
+--- a/Documentation/userspace-api/spec_ctrl.rst
++++ b/Documentation/userspace-api/spec_ctrl.rst
+@@ -47,6 +47,8 @@ If PR_SPEC_PRCTL is set, then the per-task control of the 
mitigation is
+ available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+ misfeature will fail.
+ 
++.. _set_spec_ctrl:
++
+ PR_SET_SPECULATION_CTRL
+ -----------------------
+ 
+diff --git a/Makefile b/Makefile
+index c36e64bd9ae7..97c744513af0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 133
++SUBLEVEL = 134
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
+index 333daab7def0..93453fa48193 100644
+--- a/arch/arc/kernel/unwind.c
++++ b/arch/arc/kernel/unwind.c
+@@ -185,11 +185,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
+                                      MAX_DMA_ADDRESS);
+ }
+ 
+-static void *unw_hdr_alloc(unsigned long sz)
+-{
+-      return kmalloc(sz, GFP_KERNEL);
+-}
+-
+ static void init_unwind_table(struct unwind_table *table, const char *name,
+                             const void *core_start, unsigned long core_size,
+                             const void *init_start, unsigned long init_size,
+@@ -370,6 +365,10 @@ ret_err:
+ }
+ 
+ #ifdef CONFIG_MODULES
++static void *unw_hdr_alloc(unsigned long sz)
++{
++      return kmalloc(sz, GFP_KERNEL);
++}
+ 
+ static struct unwind_table *last_table;
+ 
+diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi 
b/arch/arm/boot/dts/am335x-pcm-953.dtsi
+index 1ec8e0d80191..572fbd254690 100644
+--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
++++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
+@@ -197,7 +197,7 @@
+       bus-width = <4>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&mmc1_pins>;
+-      cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
++      cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+       status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/am335x-wega.dtsi 
b/arch/arm/boot/dts/am335x-wega.dtsi
+index 8ce541739b24..83e4fe595e37 100644
+--- a/arch/arm/boot/dts/am335x-wega.dtsi
++++ b/arch/arm/boot/dts/am335x-wega.dtsi
+@@ -157,7 +157,7 @@
+       bus-width = <4>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&mmc1_pins>;
+-      cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
++      cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+       status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
+index 036aeba4f02c..49f4bdc0d864 100644
+--- a/arch/arm/boot/dts/imx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul.dtsi
+@@ -342,7 +342,7 @@
+                       pwm1: pwm@02080000 {
+                               compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+                               reg = <0x02080000 0x4000>;
+-                              interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
++                              interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks IMX6UL_CLK_PWM1>,
+                                        <&clks IMX6UL_CLK_PWM1>;
+                               clock-names = "ipg", "per";
+@@ -353,7 +353,7 @@
+                       pwm2: pwm@02084000 {
+                               compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+                               reg = <0x02084000 0x4000>;
+-                              interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
++                              interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks IMX6UL_CLK_PWM2>,
+                                        <&clks IMX6UL_CLK_PWM2>;
+                               clock-names = "ipg", "per";
+@@ -364,7 +364,7 @@
+                       pwm3: pwm@02088000 {
+                               compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+                               reg = <0x02088000 0x4000>;
+-                              interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
++                              interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks IMX6UL_CLK_PWM3>,
+                                        <&clks IMX6UL_CLK_PWM3>;
+                               clock-names = "ipg", "per";
+@@ -375,7 +375,7 @@
+                       pwm4: pwm@0208c000 {
+                               compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+                               reg = <0x0208c000 0x4000>;
+-                              interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
++                              interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks IMX6UL_CLK_PWM4>,
+                                        <&clks IMX6UL_CLK_PWM4>;
+                               clock-names = "ipg", "per";
+diff --git a/arch/arm/mach-davinci/board-da850-evm.c 
b/arch/arm/mach-davinci/board-da850-evm.c
+index 2f6ac1afa804..686e7e6f2eb3 100644
+--- a/arch/arm/mach-davinci/board-da850-evm.c
++++ b/arch/arm/mach-davinci/board-da850-evm.c
+@@ -1464,6 +1464,8 @@ static __init void da850_evm_init(void)
+       if (ret)
+               pr_warn("%s: dsp/rproc registration failed: %d\n",
+                       __func__, ret);
++
++      regulator_has_full_constraints();
+ }
+ 
+ #ifdef CONFIG_SERIAL_8250_CONSOLE
+diff --git a/arch/arm/mach-davinci/devices-da8xx.c 
b/arch/arm/mach-davinci/devices-da8xx.c
+index 22440c05d66a..7120f93eab0b 100644
+--- a/arch/arm/mach-davinci/devices-da8xx.c
++++ b/arch/arm/mach-davinci/devices-da8xx.c
+@@ -699,6 +699,9 @@ static struct platform_device da8xx_lcdc_device = {
+       .id             = 0,
+       .num_resources  = ARRAY_SIZE(da8xx_lcdc_resources),
+       .resource       = da8xx_lcdc_resources,
++      .dev            = {
++              .coherent_dma_mask      = DMA_BIT_MASK(32),
++      }
+ };
+ 
+ int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata)
+diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
+index a2dd13217c89..2819c43fe754 100644
+--- a/arch/arm/mach-omap2/prm3xxx.c
++++ b/arch/arm/mach-omap2/prm3xxx.c
+@@ -433,7 +433,7 @@ static void omap3_prm_reconfigure_io_chain(void)
+  * registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
+  * No return value.
+  */
+-static void __init omap3xxx_prm_enable_io_wakeup(void)
++static void omap3xxx_prm_enable_io_wakeup(void)
+ {
+       if (prm_features & PRM_HAS_IO_WAKEUP)
+               omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
+diff --git a/arch/mips/include/uapi/asm/sgidefs.h 
b/arch/mips/include/uapi/asm/sgidefs.h
+index 26143e3b7c26..69c3de90c536 100644
+--- a/arch/mips/include/uapi/asm/sgidefs.h
++++ b/arch/mips/include/uapi/asm/sgidefs.h
+@@ -11,14 +11,6 @@
+ #ifndef __ASM_SGIDEFS_H
+ #define __ASM_SGIDEFS_H
+ 
+-/*
+- * Using a Linux compiler for building Linux seems logic but not to
+- * everybody.
+- */
+-#ifndef __linux__
+-#error Use a Linux compiler or give up.
+-#endif
+-
+ /*
+  * Definitions for the ISA levels
+  *
+diff --git a/arch/s390/include/asm/facility.h 
b/arch/s390/include/asm/facility.h
+index 2d58478c2745..9fee469d7130 100644
+--- a/arch/s390/include/asm/facility.h
++++ b/arch/s390/include/asm/facility.h
+@@ -59,6 +59,18 @@ static inline int test_facility(unsigned long nr)
+       return __test_facility(nr, &S390_lowcore.stfle_fac_list);
+ }
+ 
++static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
++{
++      register unsigned long reg0 asm("0") = size - 1;
++
++      asm volatile(
++              ".insn s,0xb2b00000,0(%1)" /* stfle */
++              : "+d" (reg0)
++              : "a" (stfle_fac_list)
++              : "memory", "cc");
++      return reg0;
++}
++
+ /**
+  * stfle - Store facility list extended
+  * @stfle_fac_list: array where facility list can be stored
+@@ -76,13 +88,8 @@ static inline void stfle(u64 *stfle_fac_list, int size)
+       memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
+       if (S390_lowcore.stfl_fac_list & 0x01000000) {
+               /* More facility bits available with stfle */
+-              register unsigned long reg0 asm("0") = size - 1;
+-
+-              asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
+-                           : "+d" (reg0)
+-                           : "a" (stfle_fac_list)
+-                           : "memory", "cc");
+-              nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
++              nr = __stfle_asm(stfle_fac_list, size);
++              nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
+       }
+       memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+       preempt_enable();
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 45b5c6c4a55e..7c67d8939f3e 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -117,26 +117,27 @@ unsigned long __head __startup_64(unsigned long physaddr,
+               pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
+               pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
+ 
+-              i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
+-              p4d[i + 0] = (pgdval_t)pud + pgtable_flags;
+-              p4d[i + 1] = (pgdval_t)pud + pgtable_flags;
++              i = physaddr >> P4D_SHIFT;
++              p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
++              p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
+       } else {
+               i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+               pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
+               pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
+       }
+ 
+-      i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
+-      pud[i + 0] = (pudval_t)pmd + pgtable_flags;
+-      pud[i + 1] = (pudval_t)pmd + pgtable_flags;
++      i = physaddr >> PUD_SHIFT;
++      pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
++      pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
+ 
+       pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+       pmd_entry += sme_get_me_mask();
+       pmd_entry +=  physaddr;
+ 
+       for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
+-              int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
+-              pmd[idx] = pmd_entry + i * PMD_SIZE;
++              int idx = i + (physaddr >> PMD_SHIFT);
++
++              pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
+       }
+ 
+       /*
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index ed5c4cdf0a34..2a65ab291312 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -24,6 +24,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/export.h>
+ #include <linux/context_tracking.h>
++#include <linux/nospec.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -651,9 +652,11 @@ static unsigned long ptrace_get_debugreg(struct 
task_struct *tsk, int n)
+ {
+       struct thread_struct *thread = &tsk->thread;
+       unsigned long val = 0;
++      int index = n;
+ 
+       if (n < HBP_NUM) {
+-              struct perf_event *bp = thread->ptrace_bps[n];
++              struct perf_event *bp = thread->ptrace_bps[index];
++              index = array_index_nospec(index, HBP_NUM);
+ 
+               if (bp)
+                       val = bp->hw.info.address;
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index a5b802a12212..71d3fef1edc9 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -5,6 +5,7 @@
+ #include <linux/user.h>
+ #include <linux/regset.h>
+ #include <linux/syscalls.h>
++#include <linux/nospec.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/desc.h>
+@@ -220,6 +221,7 @@ int do_get_thread_area(struct task_struct *p, int idx,
+                      struct user_desc __user *u_info)
+ {
+       struct user_desc info;
++      int index;
+ 
+       if (idx == -1 && get_user(idx, &u_info->entry_number))
+               return -EFAULT;
+@@ -227,8 +229,11 @@ int do_get_thread_area(struct task_struct *p, int idx,
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+ 
+-      fill_user_desc(&info, idx,
+-                     &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
++      index = idx - GDT_ENTRY_TLS_MIN;
++      index = array_index_nospec(index,
++                      GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1);
++
++      fill_user_desc(&info, idx, &p->thread.tls_array[index]);
+ 
+       if (copy_to_user(u_info, &info, sizeof(info)))
+               return -EFAULT;
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 7d45ac451745..e65b0da1007b 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -3760,6 +3760,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, 
bool is_sync)
+               unsigned long flags;
+ 
+               spin_lock_irqsave(&bfqd->lock, flags);
++              bfqq->bic = NULL;
+               bfq_exit_bfqq(bfqd, bfqq);
+               bic_set_bfqq(bic, NULL, is_sync);
+               spin_unlock_irqrestore(&bfqd->lock, flags);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 96a0f940e54d..1af9f36f89cf 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -3876,6 +3876,8 @@ retry:
+               case BINDER_WORK_TRANSACTION_COMPLETE: {
+                       binder_inner_proc_unlock(proc);
+                       cmd = BR_TRANSACTION_COMPLETE;
++                      kfree(w);
++                      binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+                       if (put_user(cmd, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+@@ -3884,8 +3886,6 @@ retry:
+                       binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+                                    "%d:%d BR_TRANSACTION_COMPLETE\n",
+                                    proc->pid, thread->pid);
+-                      kfree(w);
+-                      binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+               } break;
+               case BINDER_WORK_NODE: {
+                       struct binder_node *node = container_of(w, struct 
binder_node, work);
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 07532d83be0b..e405ea3ca8d8 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -669,7 +669,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu)
+ 
+ static int __init cacheinfo_sysfs_init(void)
+ {
+-      return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
++      return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
++                               "base/cacheinfo:online",
+                                cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
+ }
+ device_initcall(cacheinfo_sysfs_init);
+diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
+index 82e4d5cccf84..2df8564f08a0 100644
+--- a/drivers/clk/ti/clkctrl.c
++++ b/drivers/clk/ti/clkctrl.c
+@@ -215,6 +215,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct 
of_phandle_args *clkspec,
+ {
+       struct omap_clkctrl_provider *provider = data;
+       struct omap_clkctrl_clk *entry;
++      bool found = false;
+ 
+       if (clkspec->args_count != 2)
+               return ERR_PTR(-EINVAL);
+@@ -224,11 +225,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct 
of_phandle_args *clkspec,
+ 
+       list_for_each_entry(entry, &provider->clocks, node) {
+               if (entry->reg_offset == clkspec->args[0] &&
+-                  entry->bit_offset == clkspec->args[1])
++                  entry->bit_offset == clkspec->args[1]) {
++                      found = true;
+                       break;
++              }
+       }
+ 
+-      if (!entry)
++      if (!found)
+               return ERR_PTR(-EINVAL);
+ 
+       return entry->clk;
+diff --git a/drivers/crypto/nx/nx-842-powernv.c 
b/drivers/crypto/nx/nx-842-powernv.c
+index 874ddf5e9087..dbf80b55c2a4 100644
+--- a/drivers/crypto/nx/nx-842-powernv.c
++++ b/drivers/crypto/nx/nx-842-powernv.c
+@@ -34,8 +34,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
+ #define WORKMEM_ALIGN (CRB_ALIGN)
+ #define CSB_WAIT_MAX  (5000) /* ms */
+ #define VAS_RETRIES   (10)
+-/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
+-#define MAX_CREDITS_PER_RXFIFO        (1024)
+ 
+ struct nx842_workmem {
+       /* Below fields must be properly aligned */
+@@ -801,7 +799,11 @@ static int __init vas_cfg_coproc_info(struct device_node 
*dn, int chip_id,
+       rxattr.lnotify_lpid = lpid;
+       rxattr.lnotify_pid = pid;
+       rxattr.lnotify_tid = tid;
+-      rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
++      /*
++       * Maximum RX window credits can not be more than #CRBs in
++       * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
++       */
++      rxattr.wcreds_max = fifo_size / CRB_SIZE;
+ 
+       /*
+        * Open a VAS receice window which is used to configure RxFIFO
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 4388f4e3840c..1f8fe1795964 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -2185,7 +2185,7 @@ static struct talitos_alg_template driver_algs[] = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+-                                                 "cbc-aes-talitos",
++                                                 "cbc-aes-talitos-hsna",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+@@ -2229,7 +2229,7 @@ static struct talitos_alg_template driver_algs[] = {
+                               .cra_name = "authenc(hmac(sha1),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+-                                                 "cbc-3des-talitos",
++                                                 "cbc-3des-talitos-hsna",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+@@ -2271,7 +2271,7 @@ static struct talitos_alg_template driver_algs[] = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+-                                                 "cbc-aes-talitos",
++                                                 "cbc-aes-talitos-hsna",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+@@ -2315,7 +2315,7 @@ static struct talitos_alg_template driver_algs[] = {
+                               .cra_name = "authenc(hmac(sha224),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+-                                                 "cbc-3des-talitos",
++                                                 "cbc-3des-talitos-hsna",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+@@ -2357,7 +2357,7 @@ static struct talitos_alg_template driver_algs[] = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+-                                                 "cbc-aes-talitos",
++                                                 "cbc-aes-talitos-hsna",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+@@ -2401,7 +2401,7 @@ static struct talitos_alg_template driver_algs[] = {
+                               .cra_name = "authenc(hmac(sha256),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+-                                                 "cbc-3des-talitos",
++                                                 "cbc-3des-talitos-hsna",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+@@ -2527,7 +2527,7 @@ static struct talitos_alg_template driver_algs[] = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+-                                                 "cbc-aes-talitos",
++                                                 "cbc-aes-talitos-hsna",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+@@ -2569,7 +2569,7 @@ static struct talitos_alg_template driver_algs[] = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+-                                                 "cbc-3des-talitos",
++                                                 "cbc-3des-talitos-hsna",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
+index 50793fda7819..e3d86aa1ad5d 100644
+--- a/drivers/firmware/efi/efi-bgrt.c
++++ b/drivers/firmware/efi/efi-bgrt.c
+@@ -50,11 +50,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
+                      bgrt->version);
+               goto out;
+       }
+-      if (bgrt->status & 0xfe) {
+-              pr_notice("Ignoring BGRT: reserved status bits are non-zero 
%u\n",
+-                     bgrt->status);
+-              goto out;
+-      }
+       if (bgrt->image_type != 0) {
+               pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
+                      bgrt->image_type);
+diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
+index 0f05b8d8fefa..b829fde80f7b 100644
+--- a/drivers/gpu/drm/drm_bufs.c
++++ b/drivers/gpu/drm/drm_bufs.c
+@@ -1321,7 +1321,10 @@ static int copy_one_buf(void *data, int count, struct 
drm_buf_entry *from)
+                                .size = from->buf_size,
+                                .low_mark = from->low_mark,
+                                .high_mark = from->high_mark};
+-      return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags));
++
++      if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
++              return -EFAULT;
++      return 0;
+ }
+ 
+ int drm_legacy_infobufs(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index f8e96e648acf..bfeeb6a56135 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -372,7 +372,10 @@ static int copy_one_buf32(void *data, int count, struct 
drm_buf_entry *from)
+                             .size = from->buf_size,
+                             .low_mark = from->low_mark,
+                             .high_mark = from->high_mark};
+-      return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags));
++
++      if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
++              return -EFAULT;
++      return 0;
+ }
+ 
+ static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
+index b45ac6bc8add..b428c3da7576 100644
+--- a/drivers/gpu/drm/udl/udl_drv.c
++++ b/drivers/gpu/drm/udl/udl_drv.c
+@@ -43,10 +43,16 @@ static const struct file_operations udl_driver_fops = {
+       .llseek = noop_llseek,
+ };
+ 
++static void udl_driver_release(struct drm_device *dev)
++{
++      udl_fini(dev);
++      udl_modeset_cleanup(dev);
++      drm_dev_fini(dev);
++      kfree(dev);
++}
++
+ static struct drm_driver driver = {
+       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+-      .load = udl_driver_load,
+-      .unload = udl_driver_unload,
+       .release = udl_driver_release,
+ 
+       /* gem hooks */
+@@ -70,28 +76,56 @@ static struct drm_driver driver = {
+       .patchlevel = DRIVER_PATCHLEVEL,
+ };
+ 
++static struct udl_device *udl_driver_create(struct usb_interface *interface)
++{
++      struct usb_device *udev = interface_to_usbdev(interface);
++      struct udl_device *udl;
++      int r;
++
++      udl = kzalloc(sizeof(*udl), GFP_KERNEL);
++      if (!udl)
++              return ERR_PTR(-ENOMEM);
++
++      r = drm_dev_init(&udl->drm, &driver, &interface->dev);
++      if (r) {
++              kfree(udl);
++              return ERR_PTR(r);
++      }
++
++      udl->udev = udev;
++      udl->drm.dev_private = udl;
++
++      r = udl_init(udl);
++      if (r) {
++              drm_dev_fini(&udl->drm);
++              kfree(udl);
++              return ERR_PTR(r);
++      }
++
++      usb_set_intfdata(interface, udl);
++      return udl;
++}
++
+ static int udl_usb_probe(struct usb_interface *interface,
+                        const struct usb_device_id *id)
+ {
+-      struct usb_device *udev = interface_to_usbdev(interface);
+-      struct drm_device *dev;
+       int r;
++      struct udl_device *udl;
+ 
+-      dev = drm_dev_alloc(&driver, &interface->dev);
+-      if (IS_ERR(dev))
+-              return PTR_ERR(dev);
++      udl = udl_driver_create(interface);
++      if (IS_ERR(udl))
++              return PTR_ERR(udl);
+ 
+-      r = drm_dev_register(dev, (unsigned long)udev);
++      r = drm_dev_register(&udl->drm, 0);
+       if (r)
+               goto err_free;
+ 
+-      usb_set_intfdata(interface, dev);
+-      DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
++      DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
+ 
+       return 0;
+ 
+ err_free:
+-      drm_dev_unref(dev);
++      drm_dev_unref(&udl->drm);
+       return r;
+ }
+ 
+diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
+index 307455dd6526..d5a5dcd15dd8 100644
+--- a/drivers/gpu/drm/udl/udl_drv.h
++++ b/drivers/gpu/drm/udl/udl_drv.h
+@@ -49,8 +49,8 @@ struct urb_list {
+ struct udl_fbdev;
+ 
+ struct udl_device {
++      struct drm_device drm;
+       struct device *dev;
+-      struct drm_device *ddev;
+       struct usb_device *udev;
+       struct drm_crtc *crtc;
+ 
+@@ -68,6 +68,8 @@ struct udl_device {
+       atomic_t cpu_kcycles_used; /* transpired during pixel processing */
+ };
+ 
++#define to_udl(x) container_of(x, struct udl_device, drm)
++
+ struct udl_gem_object {
+       struct drm_gem_object base;
+       struct page **pages;
+@@ -99,9 +101,8 @@ struct urb *udl_get_urb(struct drm_device *dev);
+ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
+ void udl_urb_completion(struct urb *urb);
+ 
+-int udl_driver_load(struct drm_device *dev, unsigned long flags);
+-void udl_driver_unload(struct drm_device *dev);
+-void udl_driver_release(struct drm_device *dev);
++int udl_init(struct udl_device *udl);
++void udl_fini(struct drm_device *dev);
+ 
+ int udl_fbdev_init(struct drm_device *dev);
+ void udl_fbdev_cleanup(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index 491f1892b50e..f41fd0684ce4 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int 
y,
+                     int width, int height)
+ {
+       struct drm_device *dev = fb->base.dev;
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       int i, ret;
+       char *cmd;
+       cycles_t start_cycles, end_cycles;
+@@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user)
+ {
+       struct udl_fbdev *ufbdev = info->par;
+       struct drm_device *dev = ufbdev->ufb.base.dev;
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+ 
+       /* If the USB device is gone, we don't accept new opens */
+-      if (drm_dev_is_unplugged(udl->ddev))
++      if (drm_dev_is_unplugged(&udl->drm))
+               return -ENODEV;
+ 
+       ufbdev->fb_count++;
+@@ -441,7 +441,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
+ 
+ int udl_fbdev_init(struct drm_device *dev)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       int bpp_sel = fb_bpp;
+       struct udl_fbdev *ufbdev;
+       int ret;
+@@ -480,7 +480,7 @@ free:
+ 
+ void udl_fbdev_cleanup(struct drm_device *dev)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       if (!udl->fbdev)
+               return;
+ 
+@@ -491,7 +491,7 @@ void udl_fbdev_cleanup(struct drm_device *dev)
+ 
+ void udl_fbdev_unplug(struct drm_device *dev)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       struct udl_fbdev *ufbdev;
+       if (!udl->fbdev)
+               return;
+diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
+index 60866b422f81..124428f33e1e 100644
+--- a/drivers/gpu/drm/udl/udl_main.c
++++ b/drivers/gpu/drm/udl/udl_main.c
+@@ -28,7 +28,7 @@
+ static int udl_parse_vendor_descriptor(struct drm_device *dev,
+                                      struct usb_device *usbdev)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       char *desc;
+       char *buf;
+       char *desc_end;
+@@ -164,7 +164,7 @@ void udl_urb_completion(struct urb *urb)
+ 
+ static void udl_free_urb_list(struct drm_device *dev)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       int count = udl->urbs.count;
+       struct list_head *node;
+       struct urb_node *unode;
+@@ -198,7 +198,7 @@ static void udl_free_urb_list(struct drm_device *dev)
+ 
+ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       struct urb *urb;
+       struct urb_node *unode;
+       char *buf;
+@@ -262,7 +262,7 @@ retry:
+ 
+ struct urb *udl_get_urb(struct drm_device *dev)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       int ret = 0;
+       struct list_head *entry;
+       struct urb_node *unode;
+@@ -296,7 +296,7 @@ error:
+ 
+ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+       int ret;
+ 
+       BUG_ON(len > udl->urbs.size);
+@@ -311,20 +311,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb 
*urb, size_t len)
+       return ret;
+ }
+ 
+-int udl_driver_load(struct drm_device *dev, unsigned long flags)
++int udl_init(struct udl_device *udl)
+ {
+-      struct usb_device *udev = (void*)flags;
+-      struct udl_device *udl;
++      struct drm_device *dev = &udl->drm;
+       int ret = -ENOMEM;
+ 
+       DRM_DEBUG("\n");
+-      udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
+-      if (!udl)
+-              return -ENOMEM;
+-
+-      udl->udev = udev;
+-      udl->ddev = dev;
+-      dev->dev_private = udl;
+ 
+       if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
+               ret = -ENODEV;
+@@ -359,7 +351,6 @@ err_fb:
+ err:
+       if (udl->urbs.count)
+               udl_free_urb_list(dev);
+-      kfree(udl);
+       DRM_ERROR("%d\n", ret);
+       return ret;
+ }
+@@ -370,20 +361,12 @@ int udl_drop_usb(struct drm_device *dev)
+       return 0;
+ }
+ 
+-void udl_driver_unload(struct drm_device *dev)
++void udl_fini(struct drm_device *dev)
+ {
+-      struct udl_device *udl = dev->dev_private;
++      struct udl_device *udl = to_udl(dev);
+ 
+       if (udl->urbs.count)
+               udl_free_urb_list(dev);
+ 
+       udl_fbdev_cleanup(dev);
+-      kfree(udl);
+-}
+-
+-void udl_driver_release(struct drm_device *dev)
+-{
+-      udl_modeset_cleanup(dev);
+-      drm_dev_fini(dev);
+-      kfree(dev);
+ }
+diff --git a/drivers/input/keyboard/imx_keypad.c 
b/drivers/input/keyboard/imx_keypad.c
+index 2165f3dd328b..842c0235471d 100644
+--- a/drivers/input/keyboard/imx_keypad.c
++++ b/drivers/input/keyboard/imx_keypad.c
+@@ -530,11 +530,12 @@ static int imx_keypad_probe(struct platform_device *pdev)
+       return 0;
+ }
+ 
+-static int __maybe_unused imx_kbd_suspend(struct device *dev)
++static int __maybe_unused imx_kbd_noirq_suspend(struct device *dev)
+ {
+       struct platform_device *pdev = to_platform_device(dev);
+       struct imx_keypad *kbd = platform_get_drvdata(pdev);
+       struct input_dev *input_dev = kbd->input_dev;
++      unsigned short reg_val = readw(kbd->mmio_base + KPSR);
+ 
+       /* imx kbd can wake up system even clock is disabled */
+       mutex_lock(&input_dev->mutex);
+@@ -544,13 +545,20 @@ static int __maybe_unused imx_kbd_suspend(struct device 
*dev)
+ 
+       mutex_unlock(&input_dev->mutex);
+ 
+-      if (device_may_wakeup(&pdev->dev))
++      if (device_may_wakeup(&pdev->dev)) {
++              if (reg_val & KBD_STAT_KPKD)
++                      reg_val |= KBD_STAT_KRIE;
++              if (reg_val & KBD_STAT_KPKR)
++                      reg_val |= KBD_STAT_KDIE;
++              writew(reg_val, kbd->mmio_base + KPSR);
++
+               enable_irq_wake(kbd->irq);
++      }
+ 
+       return 0;
+ }
+ 
+-static int __maybe_unused imx_kbd_resume(struct device *dev)
++static int __maybe_unused imx_kbd_noirq_resume(struct device *dev)
+ {
+       struct platform_device *pdev = to_platform_device(dev);
+       struct imx_keypad *kbd = platform_get_drvdata(pdev);
+@@ -574,7 +582,9 @@ err_clk:
+       return ret;
+ }
+ 
+-static SIMPLE_DEV_PM_OPS(imx_kbd_pm_ops, imx_kbd_suspend, imx_kbd_resume);
++static const struct dev_pm_ops imx_kbd_pm_ops = {
++      SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_kbd_noirq_suspend, 
imx_kbd_noirq_resume)
++};
+ 
+ static struct platform_driver imx_keypad_driver = {
+       .driver         = {
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index fda33fc3ffcc..ab4888d043f0 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1191,6 +1191,8 @@ static const char * const middle_button_pnp_ids[] = {
+       "LEN2132", /* ThinkPad P52 */
+       "LEN2133", /* ThinkPad P72 w/ NFC */
+       "LEN2134", /* ThinkPad P72 */
++      "LEN0407",
++      "LEN0408",
+       NULL
+ };
+ 
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index a5f279da83a1..1a6a05c45ee7 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -176,6 +176,7 @@ static const char * const smbus_pnp_ids[] = {
+       "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
+       "LEN0073", /* X1 Carbon G5 (Elantech) */
+       "LEN0092", /* X1 Carbon 6 */
++      "LEN0093", /* T480 */
+       "LEN0096", /* X280 */
+       "LEN0097", /* X280 -> ALPS trackpoint */
+       "LEN200f", /* T450s */
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 8573c70a1880..e705799976c2 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -276,8 +276,8 @@ static int verity_handle_err(struct dm_verity *v, enum 
verity_block_type type,
+               BUG();
+       }
+ 
+-      DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
+-              block);
++      DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
++                  type_str, block);
+ 
+       if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
+               DMERR("%s: reached maximum errors", v->data_dev->name);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index b27a69388dcd..764ed9c46629 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7605,9 +7605,9 @@ static void status_unused(struct seq_file *seq)
+ static int status_resync(struct seq_file *seq, struct mddev *mddev)
+ {
+       sector_t max_sectors, resync, res;
+-      unsigned long dt, db;
+-      sector_t rt;
+-      int scale;
++      unsigned long dt, db = 0;
++      sector_t rt, curr_mark_cnt, resync_mark_cnt;
++      int scale, recovery_active;
+       unsigned int per_milli;
+ 
+       if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+@@ -7677,22 +7677,30 @@ static int status_resync(struct seq_file *seq, struct 
mddev *mddev)
+        * db: blocks written from mark until now
+        * rt: remaining time
+        *
+-       * rt is a sector_t, so could be 32bit or 64bit.
+-       * So we divide before multiply in case it is 32bit and close
+-       * to the limit.
+-       * We scale the divisor (db) by 32 to avoid losing precision
+-       * near the end of resync when the number of remaining sectors
+-       * is close to 'db'.
+-       * We then divide rt by 32 after multiplying by db to compensate.
+-       * The '+1' avoids division by zero if db is very small.
++       * rt is a sector_t, which is always 64bit now. We are keeping
++       * the original algorithm, but it is not really necessary.
++       *
++       * Original algorithm:
++       *   So we divide before multiply in case it is 32bit and close
++       *   to the limit.
++       *   We scale the divisor (db) by 32 to avoid losing precision
++       *   near the end of resync when the number of remaining sectors
++       *   is close to 'db'.
++       *   We then divide rt by 32 after multiplying by db to compensate.
++       *   The '+1' avoids division by zero if db is very small.
+        */
+       dt = ((jiffies - mddev->resync_mark) / HZ);
+       if (!dt) dt++;
+-      db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
+-              - mddev->resync_mark_cnt;
++
++      curr_mark_cnt = mddev->curr_mark_cnt;
++      recovery_active = atomic_read(&mddev->recovery_active);
++      resync_mark_cnt = mddev->resync_mark_cnt;
++
++      if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
++              db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
+ 
+       rt = max_sectors - resync;    /* number of remaining sectors */
+-      sector_div(rt, db/32+1);
++      rt = div64_u64(rt, db/32+1);
+       rt *= dt;
+       rt >>= 5;
+ 
+diff --git a/drivers/misc/vmw_vmci/vmci_context.c 
b/drivers/misc/vmw_vmci/vmci_context.c
+index 21d0fa592145..bc089e634a75 100644
+--- a/drivers/misc/vmw_vmci/vmci_context.c
++++ b/drivers/misc/vmw_vmci/vmci_context.c
+@@ -29,6 +29,9 @@
+ #include "vmci_driver.h"
+ #include "vmci_event.h"
+ 
++/* Use a wide upper bound for the maximum contexts. */
++#define VMCI_MAX_CONTEXTS 2000
++
+ /*
+  * List of current VMCI contexts.  Contexts can be added by
+  * vmci_ctx_create() and removed via vmci_ctx_destroy().
+@@ -125,19 +128,22 @@ struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
+       /* Initialize host-specific VMCI context. */
+       init_waitqueue_head(&context->host_context.wait_queue);
+ 
+-      context->queue_pair_array = vmci_handle_arr_create(0);
++      context->queue_pair_array =
++              vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT);
+       if (!context->queue_pair_array) {
+               error = -ENOMEM;
+               goto err_free_ctx;
+       }
+ 
+-      context->doorbell_array = vmci_handle_arr_create(0);
++      context->doorbell_array =
++              vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+       if (!context->doorbell_array) {
+               error = -ENOMEM;
+               goto err_free_qp_array;
+       }
+ 
+-      context->pending_doorbell_array = vmci_handle_arr_create(0);
++      context->pending_doorbell_array =
++              vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+       if (!context->pending_doorbell_array) {
+               error = -ENOMEM;
+               goto err_free_db_array;
+@@ -212,7 +218,7 @@ static int ctx_fire_notification(u32 context_id, u32 
priv_flags)
+        * We create an array to hold the subscribers we find when
+        * scanning through all contexts.
+        */
+-      subscriber_array = vmci_handle_arr_create(0);
++      subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS);
+       if (subscriber_array == NULL)
+               return VMCI_ERROR_NO_MEM;
+ 
+@@ -631,20 +637,26 @@ int vmci_ctx_add_notification(u32 context_id, u32 
remote_cid)
+ 
+       spin_lock(&context->lock);
+ 
+-      list_for_each_entry(n, &context->notifier_list, node) {
+-              if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+-                      exists = true;
+-                      break;
++      if (context->n_notifiers < VMCI_MAX_CONTEXTS) {
++              list_for_each_entry(n, &context->notifier_list, node) {
++                      if (vmci_handle_is_equal(n->handle, notifier->handle)) {
++                              exists = true;
++                              break;
++                      }
+               }
+-      }
+ 
+-      if (exists) {
+-              kfree(notifier);
+-              result = VMCI_ERROR_ALREADY_EXISTS;
++              if (exists) {
++                      kfree(notifier);
++                      result = VMCI_ERROR_ALREADY_EXISTS;
++              } else {
++                      list_add_tail_rcu(&notifier->node,
++                                        &context->notifier_list);
++                      context->n_notifiers++;
++                      result = VMCI_SUCCESS;
++              }
+       } else {
+-              list_add_tail_rcu(&notifier->node, &context->notifier_list);
+-              context->n_notifiers++;
+-              result = VMCI_SUCCESS;
++              kfree(notifier);
++              result = VMCI_ERROR_NO_MEM;
+       }
+ 
+       spin_unlock(&context->lock);
+@@ -729,8 +741,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx 
*context,
+                                       u32 *buf_size, void **pbuf)
+ {
+       struct dbell_cpt_state *dbells;
+-      size_t n_doorbells;
+-      int i;
++      u32 i, n_doorbells;
+ 
+       n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
+       if (n_doorbells > 0) {
+@@ -868,7 +879,8 @@ int vmci_ctx_rcv_notifications_get(u32 context_id,
+       spin_lock(&context->lock);
+ 
+       *db_handle_array = context->pending_doorbell_array;
+-      context->pending_doorbell_array = vmci_handle_arr_create(0);
++      context->pending_doorbell_array =
++              vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+       if (!context->pending_doorbell_array) {
+               context->pending_doorbell_array = *db_handle_array;
+               *db_handle_array = NULL;
+@@ -950,12 +962,11 @@ int vmci_ctx_dbell_create(u32 context_id, struct 
vmci_handle handle)
+               return VMCI_ERROR_NOT_FOUND;
+ 
+       spin_lock(&context->lock);
+-      if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
+-              vmci_handle_arr_append_entry(&context->doorbell_array, handle);
+-              result = VMCI_SUCCESS;
+-      } else {
++      if (!vmci_handle_arr_has_entry(context->doorbell_array, handle))
++              result = vmci_handle_arr_append_entry(&context->doorbell_array,
++                                                    handle);
++      else
+               result = VMCI_ERROR_DUPLICATE_ENTRY;
+-      }
+ 
+       spin_unlock(&context->lock);
+       vmci_ctx_put(context);
+@@ -1091,15 +1102,16 @@ int vmci_ctx_notify_dbell(u32 src_cid,
+                       if (!vmci_handle_arr_has_entry(
+                                       dst_context->pending_doorbell_array,
+                                       handle)) {
+-                              vmci_handle_arr_append_entry(
++                              result = vmci_handle_arr_append_entry(
+                                       &dst_context->pending_doorbell_array,
+                                       handle);
+-
+-                              ctx_signal_notify(dst_context);
+-                              wake_up(&dst_context->host_context.wait_queue);
+-
++                              if (result == VMCI_SUCCESS) {
++                                      ctx_signal_notify(dst_context);
++                                      
wake_up(&dst_context->host_context.wait_queue);
++                              }
++                      } else {
++                              result = VMCI_SUCCESS;
+                       }
+-                      result = VMCI_SUCCESS;
+               }
+               spin_unlock(&dst_context->lock);
+       }
+@@ -1126,13 +1138,11 @@ int vmci_ctx_qp_create(struct vmci_ctx *context, 
struct vmci_handle handle)
+       if (context == NULL || vmci_handle_is_invalid(handle))
+               return VMCI_ERROR_INVALID_ARGS;
+ 
+-      if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
+-              vmci_handle_arr_append_entry(&context->queue_pair_array,
+-                                           handle);
+-              result = VMCI_SUCCESS;
+-      } else {
++      if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle))
++              result = vmci_handle_arr_append_entry(
++                      &context->queue_pair_array, handle);
++      else
+               result = VMCI_ERROR_DUPLICATE_ENTRY;
+-      }
+ 
+       return result;
+ }
+diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c 
b/drivers/misc/vmw_vmci/vmci_handle_array.c
+index 344973a0fb0a..917e18a8af95 100644
+--- a/drivers/misc/vmw_vmci/vmci_handle_array.c
++++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
+@@ -16,24 +16,29 @@
+ #include <linux/slab.h>
+ #include "vmci_handle_array.h"
+ 
+-static size_t handle_arr_calc_size(size_t capacity)
++static size_t handle_arr_calc_size(u32 capacity)
+ {
+-      return sizeof(struct vmci_handle_arr) +
++      return VMCI_HANDLE_ARRAY_HEADER_SIZE +
+           capacity * sizeof(struct vmci_handle);
+ }
+ 
+-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
++struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
+ {
+       struct vmci_handle_arr *array;
+ 
++      if (max_capacity == 0 || capacity > max_capacity)
++              return NULL;
++
+       if (capacity == 0)
+-              capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
++              capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
++                             max_capacity);
+ 
+       array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+       if (!array)
+               return NULL;
+ 
+       array->capacity = capacity;
++      array->max_capacity = max_capacity;
+       array->size = 0;
+ 
+       return array;
+@@ -44,27 +49,34 @@ void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
+       kfree(array);
+ }
+ 
+-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+-                                struct vmci_handle handle)
++int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
++                               struct vmci_handle handle)
+ {
+       struct vmci_handle_arr *array = *array_ptr;
+ 
+       if (unlikely(array->size >= array->capacity)) {
+               /* reallocate. */
+               struct vmci_handle_arr *new_array;
+-              size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
+-              size_t new_size = handle_arr_calc_size(new_capacity);
++              u32 capacity_bump = min(array->max_capacity - array->capacity,
++                                      array->capacity);
++              size_t new_size = handle_arr_calc_size(array->capacity +
++                                                     capacity_bump);
++
++              if (array->size >= array->max_capacity)
++                      return VMCI_ERROR_NO_MEM;
+ 
+               new_array = krealloc(array, new_size, GFP_ATOMIC);
+               if (!new_array)
+-                      return;
++                      return VMCI_ERROR_NO_MEM;
+ 
+-              new_array->capacity = new_capacity;
++              new_array->capacity += capacity_bump;
+               *array_ptr = array = new_array;
+       }
+ 
+       array->entries[array->size] = handle;
+       array->size++;
++
++      return VMCI_SUCCESS;
+ }
+ 
+ /*
+@@ -74,7 +86,7 @@ struct vmci_handle vmci_handle_arr_remove_entry(struct 
vmci_handle_arr *array,
+                                               struct vmci_handle entry_handle)
+ {
+       struct vmci_handle handle = VMCI_INVALID_HANDLE;
+-      size_t i;
++      u32 i;
+ 
+       for (i = 0; i < array->size; i++) {
+               if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
+@@ -109,7 +121,7 @@ struct vmci_handle vmci_handle_arr_remove_tail(struct 
vmci_handle_arr *array)
+  * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
+  */
+ struct vmci_handle
+-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
++vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index)
+ {
+       if (unlikely(index >= array->size))
+               return VMCI_INVALID_HANDLE;
+@@ -120,7 +132,7 @@ vmci_handle_arr_get_entry(const struct vmci_handle_arr 
*array, size_t index)
+ bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+                              struct vmci_handle entry_handle)
+ {
+-      size_t i;
++      u32 i;
+ 
+       for (i = 0; i < array->size; i++)
+               if (vmci_handle_is_equal(array->entries[i], entry_handle))
+diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h 
b/drivers/misc/vmw_vmci/vmci_handle_array.h
+index b5f3a7f98cf1..0fc58597820e 100644
+--- a/drivers/misc/vmw_vmci/vmci_handle_array.h
++++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
+@@ -17,32 +17,41 @@
+ #define _VMCI_HANDLE_ARRAY_H_
+ 
+ #include <linux/vmw_vmci_defs.h>
++#include <linux/limits.h>
+ #include <linux/types.h>
+ 
+-#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
+-#define VMCI_ARR_CAP_MULT 2   /* Array capacity multiplier */
+-
+ struct vmci_handle_arr {
+-      size_t capacity;
+-      size_t size;
++      u32 capacity;
++      u32 max_capacity;
++      u32 size;
++      u32 pad;
+       struct vmci_handle entries[];
+ };
+ 
+-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
++#define VMCI_HANDLE_ARRAY_HEADER_SIZE                         \
++      offsetof(struct vmci_handle_arr, entries)
++/* Select a default capacity that results in a 64 byte sized array */
++#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY                    6
++/* Make sure that the max array size can be expressed by a u32 */
++#define VMCI_HANDLE_ARRAY_MAX_CAPACITY                                \
++      ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) /        \
++      sizeof(struct vmci_handle))
++
++struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 
max_capacity);
+ void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
+-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+-                                struct vmci_handle handle);
++int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
++                               struct vmci_handle handle);
+ struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+                                               struct vmci_handle
+                                               entry_handle);
+ struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
+ struct vmci_handle
+-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
++vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index);
+ bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+                              struct vmci_handle entry_handle);
+ struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr 
*array);
+ 
+-static inline size_t vmci_handle_arr_get_size(
++static inline u32 vmci_handle_arr_get_size(
+       const struct vmci_handle_arr *array)
+ {
+       return array->size;
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index d3ce904e929e..ebad93ac8f11 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -818,6 +818,27 @@ static int m_can_poll(struct napi_struct *napi, int quota)
+       if (!irqstatus)
+               goto end;
+ 
++      /* Errata workaround for issue "Needless activation of MRAF irq"
++       * During frame reception while the MCAN is in Error Passive state
++       * and the Receive Error Counter has the value MCAN_ECR.REC = 127,
++       * it may happen that MCAN_IR.MRAF is set although there was no
++       * Message RAM access failure.
++       * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated
++       * The Message RAM Access Failure interrupt routine needs to check
++       * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127.
++       * In this case, reset MCAN_IR.MRAF. No further action is required.
++       */
++      if ((priv->version <= 31) && (irqstatus & IR_MRAF) &&
++          (m_can_read(priv, M_CAN_ECR) & ECR_RP)) {
++              struct can_berr_counter bec;
++
++              __m_can_get_berr_counter(dev, &bec);
++              if (bec.rxerr == 127) {
++                      m_can_write(priv, M_CAN_IR, IR_MRAF);
++                      irqstatus &= ~IR_MRAF;
++              }
++      }
++
+       psr = m_can_read(priv, M_CAN_PSR);
+       if (irqstatus & IR_ERR_STATE)
+               work_done += m_can_handle_state_errors(dev, psr);
+diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
+index 8f2e0dd7b756..792e9c6c4a2f 100644
+--- a/drivers/net/can/spi/Kconfig
++++ b/drivers/net/can/spi/Kconfig
+@@ -8,9 +8,10 @@ config CAN_HI311X
+         Driver for the Holt HI311x SPI CAN controllers.
+ 
+ config CAN_MCP251X
+-      tristate "Microchip MCP251x SPI CAN controllers"
++      tristate "Microchip MCP251x and MCP25625 SPI CAN controllers"
+       depends on HAS_DMA
+       ---help---
+-        Driver for the Microchip MCP251x SPI CAN controllers.
++        Driver for the Microchip MCP251x and MCP25625 SPI CAN
++        controllers.
+ 
+ endmenu
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index f3f05fea8e1f..d8c448beab24 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1,5 +1,5 @@
+ /*
+- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
++ * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface
+  *
+  * MCP2510 support and bug fixes by Christian Pellegrin
+  * <chrip...@evolware.org>
+@@ -41,7 +41,7 @@
+  * static struct spi_board_info spi_board_info[] = {
+  *         {
+  *                 .modalias = "mcp2510",
+- *                    // or "mcp2515" depending on your controller
++ *                    // "mcp2515" or "mcp25625" depending on your controller
+  *                 .platform_data = &mcp251x_info,
+  *                 .irq = IRQ_EINT13,
+  *                 .max_speed_hz = 2*1000*1000,
+@@ -238,6 +238,7 @@ static const struct can_bittiming_const 
mcp251x_bittiming_const = {
+ enum mcp251x_model {
+       CAN_MCP251X_MCP2510     = 0x2510,
+       CAN_MCP251X_MCP2515     = 0x2515,
++      CAN_MCP251X_MCP25625    = 0x25625,
+ };
+ 
+ struct mcp251x_priv {
+@@ -280,7 +281,6 @@ static inline int mcp251x_is_##_model(struct spi_device 
*spi) \
+ }
+ 
+ MCP251X_IS(2510);
+-MCP251X_IS(2515);
+ 
+ static void mcp251x_clean(struct net_device *net)
+ {
+@@ -640,7 +640,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
+ 
+       /* Wait for oscillator startup timer after reset */
+       mdelay(MCP251X_OST_DELAY_MS);
+-      
++
+       reg = mcp251x_read_reg(spi, CANSTAT);
+       if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+               return -ENODEV;
+@@ -821,9 +821,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+               /* receive buffer 0 */
+               if (intf & CANINTF_RX0IF) {
+                       mcp251x_hw_rx(spi, 0);
+-                      /*
+-                       * Free one buffer ASAP
+-                       * (The MCP2515 does this automatically.)
++                      /* Free one buffer ASAP
++                       * (The MCP2515/25625 does this automatically.)
+                        */
+                       if (mcp251x_is_2510(spi))
+                               mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 
0x00);
+@@ -832,7 +831,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+               /* receive buffer 1 */
+               if (intf & CANINTF_RX1IF) {
+                       mcp251x_hw_rx(spi, 1);
+-                      /* the MCP2515 does this automatically */
++                      /* The MCP2515/25625 does this automatically. */
+                       if (mcp251x_is_2510(spi))
+                               clear_intf |= CANINTF_RX1IF;
+               }
+@@ -1007,6 +1006,10 @@ static const struct of_device_id mcp251x_of_match[] = {
+               .compatible     = "microchip,mcp2515",
+               .data           = (void *)CAN_MCP251X_MCP2515,
+       },
++      {
++              .compatible     = "microchip,mcp25625",
++              .data           = (void *)CAN_MCP251X_MCP25625,
++      },
+       { }
+ };
+ MODULE_DEVICE_TABLE(of, mcp251x_of_match);
+@@ -1020,6 +1023,10 @@ static const struct spi_device_id mcp251x_id_table[] = {
+               .name           = "mcp2515",
+               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2515,
+       },
++      {
++              .name           = "mcp25625",
++              .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP25625,
++      },
+       { }
+ };
+ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+@@ -1260,5 +1267,5 @@ module_spi_driver(mcp251x_can_driver);
+ 
+ MODULE_AUTHOR("Chris Elston <cels...@katalix.com>, "
+             "Christian Pellegrin <chrip...@evolware.org>");
+-MODULE_DESCRIPTION("Microchip 251x CAN driver");
++MODULE_DESCRIPTION("Microchip 251x/25625 CAN driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c 
b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+index 8c8a0ec3d6e9..f260bd30c73a 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+@@ -416,7 +416,7 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+                * VTU DBNum[7:4] are located in VTU Operation 11:8
+                */
+               op |= entry->fid & 0x000f;
+-              op |= (entry->fid & 0x00f0) << 8;
++              op |= (entry->fid & 0x00f0) << 4;
+       }
+ 
+       return mv88e6xxx_g1_vtu_op(chip, op);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 
b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+index 3fd1085a093f..65bc1929d1a8 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+@@ -1581,7 +1581,8 @@ static int bnx2x_get_module_info(struct net_device *dev,
+       }
+ 
+       if (!sff8472_comp ||
+-          (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
++          (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ) ||
++          !(diag_type & SFP_EEPROM_DDM_IMPLEMENTED)) {
+               modinfo->type = ETH_MODULE_SFF_8079;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+       } else {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h 
b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+index b7d251108c19..7115f5025664 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+@@ -62,6 +62,7 @@
+ #define SFP_EEPROM_DIAG_TYPE_ADDR             0x5c
+ #define SFP_EEPROM_DIAG_TYPE_SIZE             1
+ #define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ               (1<<2)
++#define SFP_EEPROM_DDM_IMPLEMENTED            (1<<6)
+ #define SFP_EEPROM_SFF_8472_COMP_ADDR         0x5e
+ #define SFP_EEPROM_SFF_8472_COMP_SIZE         1
+ 
+diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c 
b/drivers/net/ethernet/cavium/liquidio/lio_core.c
+index 23f6b60030c5..8c16298a252d 100644
+--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
+@@ -854,7 +854,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct 
octeon_device *oct)
+ 
+                       if (droq->ops.poll_mode) {
+                               droq->ops.napi_fn(droq);
+-                              oct_priv->napi_mask |= (1 << oq_no);
++                              oct_priv->napi_mask |= BIT_ULL(oq_no);
+                       } else {
+                               tasklet_schedule(&oct_priv->droq_tasklet);
+                       }
+diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c 
b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+index 6ce7b8435ace..f66b246acaea 100644
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+@@ -893,7 +893,7 @@ static void be_self_test(struct net_device *netdev, struct 
ethtool_test *test,
+                        u64 *data)
+ {
+       struct be_adapter *adapter = netdev_priv(netdev);
+-      int status;
++      int status, cnt;
+       u8 link_status = 0;
+ 
+       if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
+@@ -904,6 +904,9 @@ static void be_self_test(struct net_device *netdev, struct 
ethtool_test *test,
+ 
+       memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+ 
++      /* check link status before offline tests */
++      link_status = netif_carrier_ok(netdev);
++
+       if (test->flags & ETH_TEST_FL_OFFLINE) {
+               if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
+                       test->flags |= ETH_TEST_FL_FAILED;
+@@ -924,13 +927,26 @@ static void be_self_test(struct net_device *netdev, 
struct ethtool_test *test,
+               test->flags |= ETH_TEST_FL_FAILED;
+       }
+ 
+-      status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
+-      if (status) {
+-              test->flags |= ETH_TEST_FL_FAILED;
+-              data[4] = -1;
+-      } else if (!link_status) {
++      /* link status was down prior to test */
++      if (!link_status) {
+               test->flags |= ETH_TEST_FL_FAILED;
+               data[4] = 1;
++              return;
++      }
++
++      for (cnt = 10; cnt; cnt--) {
++              status = be_cmd_link_status_query(adapter, NULL, &link_status,
++                                                0);
++              if (status) {
++                      test->flags |= ETH_TEST_FL_FAILED;
++                      data[4] = -1;
++                      break;
++              }
++
++              if (link_status)
++                      break;
++
++              msleep_interruptible(500);
+       }
+ }
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c 
b/drivers/net/ethernet/ibm/ibmvnic.c
+index c914b338691b..956fbb164e6f 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1489,6 +1489,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+               return 0;
+       }
+ 
++      /* refresh device's multicast list */
++      ibmvnic_set_multi(netdev);
++
+       /* kick napi */
+       for (i = 0; i < adapter->req_rx_queues; i++)
+               napi_schedule(&adapter->napi[i]);
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c 
b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 3c214a47c1c4..1ad345796e80 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -4228,7 +4228,7 @@ void e1000e_up(struct e1000_adapter *adapter)
+               e1000_configure_msix(adapter);
+       e1000_irq_enable(adapter);
+ 
+-      netif_start_queue(adapter->netdev);
++      /* Tx queue started by watchdog timer when link is up */
+ 
+       e1000e_trigger_lsc(adapter);
+ }
+@@ -4604,6 +4604,7 @@ int e1000e_open(struct net_device *netdev)
+       pm_runtime_get_sync(&pdev->dev);
+ 
+       netif_carrier_off(netdev);
++      netif_stop_queue(netdev);
+ 
+       /* allocate transmit descriptors */
+       err = e1000e_setup_tx_resources(adapter->tx_ring);
+@@ -4664,7 +4665,6 @@ int e1000e_open(struct net_device *netdev)
+       e1000_irq_enable(adapter);
+ 
+       adapter->tx_hang_recheck = false;
+-      netif_start_queue(netdev);
+ 
+       hw->mac.get_link_status = true;
+       pm_runtime_put(&pdev->dev);
+@@ -5286,6 +5286,7 @@ static void e1000_watchdog_task(struct work_struct *work)
+                       if (phy->ops.cfg_on_link_up)
+                               phy->ops.cfg_on_link_up(hw);
+ 
++                      netif_wake_queue(netdev);
+                       netif_carrier_on(netdev);
+ 
+                       if (!test_bit(__E1000_DOWN, &adapter->state))
+@@ -5299,6 +5300,7 @@ static void e1000_watchdog_task(struct work_struct *work)
+                       /* Link status message must follow this format */
+                       pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+                       netif_carrier_off(netdev);
++                      netif_stop_queue(netdev);
+                       if (!test_bit(__E1000_DOWN, &adapter->state))
+                               mod_timer(&adapter->phy_info_timer,
+                                         round_jiffies(jiffies + 2 * HZ));
+@@ -5306,13 +5308,8 @@ static void e1000_watchdog_task(struct work_struct 
*work)
+                       /* 8000ES2LAN requires a Rx packet buffer work-around
+                        * on link down event; reset the controller to flush
+                        * the Rx packet buffer.
+-                       *
+-                       * If the link is lost the controller stops DMA, but
+-                       * if there is queued Tx work it cannot be done.  So
+-                       * reset the controller to flush the Tx packet buffers.
+                        */
+-                      if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+-                          e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
++                      if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+                               adapter->flags |= FLAG_RESTART_NOW;
+                       else
+                               pm_schedule_suspend(netdev->dev.parent,
+@@ -5335,6 +5332,14 @@ link_up:
+       adapter->gotc_old = adapter->stats.gotc;
+       spin_unlock(&adapter->stats64_lock);
+ 
++      /* If the link is lost the controller stops DMA, but
++       * if there is queued Tx work it cannot be done.  So
++       * reset the controller to flush the Tx packet buffers.
++       */
++      if (!netif_carrier_ok(netdev) &&
++          (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
++              adapter->flags |= FLAG_RESTART_NOW;
++
+       /* If reset is necessary, do it outside of interrupt context. */
+       if (adapter->flags & FLAG_RESTART_NOW) {
+               schedule_work(&adapter->reset_task);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h 
b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+index 5acfbe5b8b9d..8ab7a4f98a07 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+@@ -911,7 +911,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u8 
local_port,
+       MLXSW_REG_ZERO(spaft, payload);
+       mlxsw_reg_spaft_local_port_set(payload, local_port);
+       mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
+-      mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
++      mlxsw_reg_spaft_allow_prio_tagged_set(payload, allow_untagged);
+       mlxsw_reg_spaft_allow_tagged_set(payload, true);
+ }
+ 
+diff --git a/drivers/net/ethernet/sis/sis900.c 
b/drivers/net/ethernet/sis/sis900.c
+index 40bd88362e3d..693f9582173b 100644
+--- a/drivers/net/ethernet/sis/sis900.c
++++ b/drivers/net/ethernet/sis/sis900.c
+@@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev)
+       sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+ 
+       /* Enable all known interrupts by setting the interrupt mask. */
+-      sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
++      sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | 
TxDESC);
+       sw32(cr, RxENA | sr32(cr));
+       sw32(ier, IE);
+ 
+@@ -1580,7 +1580,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
+       sw32(txdp, sis_priv->tx_ring_dma);
+ 
+       /* Enable all known interrupts by setting the interrupt mask. */
+-      sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
++      sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | 
TxDESC);
+ }
+ 
+ /**
+@@ -1620,7 +1620,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device 
*net_dev)
+                       spin_unlock_irqrestore(&sis_priv->lock, flags);
+                       return NETDEV_TX_OK;
+       }
+-      sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
++      sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
+       sw32(cr, TxENA | sr32(cr));
+ 
+       sis_priv->cur_tx ++;
+@@ -1676,7 +1676,7 @@ static irqreturn_t sis900_interrupt(int irq, void 
*dev_instance)
+       do {
+               status = sr32(isr);
+ 
+-              if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 
0)
++              if ((status & 
(HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
+                       /* nothing intresting happened */
+                       break;
+               handled = 1;
+@@ -1686,7 +1686,7 @@ static irqreturn_t sis900_interrupt(int irq, void 
*dev_instance)
+                       /* Rx interrupt */
+                       sis900_rx(net_dev);
+ 
+-              if (status & (TxURN | TxERR | TxIDLE))
++              if (status & (TxURN | TxERR | TxIDLE | TxDESC))
+                       /* Tx interrupt */
+                       sis900_finish_xmit(net_dev);
+ 
+@@ -1898,8 +1898,8 @@ static void sis900_finish_xmit (struct net_device 
*net_dev)
+ 
+               if (tx_status & OWN) {
+                       /* The packet is not transmitted yet (owned by 
hardware) !
+-                       * Note: the interrupt is generated only when Tx Machine
+-                       * is idle, so this is an almost impossible case */
++                       * Note: this is an almost impossible condition
++                       * in case of TxDESC ('descriptor interrupt') */
+                       break;
+               }
+ 
+@@ -2475,7 +2475,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
+       sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+ 
+       /* Enable all known interrupts by setting the interrupt mask. */
+-      sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
++      sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | 
TxDESC);
+       sw32(cr, RxENA | sr32(cr));
+       sw32(ier, IE);
+ 
+diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
+index 6c7fd98cb00a..d9eda7c217e9 100644
+--- a/drivers/net/ppp/ppp_mppe.c
++++ b/drivers/net/ppp/ppp_mppe.c
+@@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcus...@fcusack.com>");
+ MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point 
Encryption support");
+ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
++MODULE_SOFTDEP("pre: arc4");
+ MODULE_VERSION("1.0.2");
+ 
+ static unsigned int
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 063daa3435e4..4b0144b2a252 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -153,7 +153,7 @@ static bool qmimux_has_slaves(struct usbnet *dev)
+ 
+ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
+-      unsigned int len, offset = 0;
++      unsigned int len, offset = 0, pad_len, pkt_len;
+       struct qmimux_hdr *hdr;
+       struct net_device *net;
+       struct sk_buff *skbn;
+@@ -171,10 +171,16 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct 
sk_buff *skb)
+               if (hdr->pad & 0x80)
+                       goto skip;
+ 
++              /* extract padding length and check for valid length info */
++              pad_len = hdr->pad & 0x3f;
++              if (len == 0 || pad_len >= len)
++                      goto skip;
++              pkt_len = len - pad_len;
++
+               net = qmimux_find_dev(dev, hdr->mux_id);
+               if (!net)
+                       goto skip;
+-              skbn = netdev_alloc_skb(net, len);
++              skbn = netdev_alloc_skb(net, pkt_len);
+               if (!skbn)
+                       return 0;
+               skbn->dev = net;
+@@ -191,7 +197,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct 
sk_buff *skb)
+                       goto skip;
+               }
+ 
+-              skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len);
++              skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len);
+               if (netif_rx(skbn) != NET_RX_SUCCESS)
+                       return 0;
+ 
+@@ -241,13 +247,14 @@ out_free_newdev:
+       return err;
+ }
+ 
+-static void qmimux_unregister_device(struct net_device *dev)
++static void qmimux_unregister_device(struct net_device *dev,
++                                   struct list_head *head)
+ {
+       struct qmimux_priv *priv = netdev_priv(dev);
+       struct net_device *real_dev = priv->real_dev;
+ 
+       netdev_upper_dev_unlink(real_dev, dev);
+-      unregister_netdevice(dev);
++      unregister_netdevice_queue(dev, head);
+ 
+       /* Get rid of the reference to real_dev */
+       dev_put(real_dev);
+@@ -356,8 +363,8 @@ static ssize_t add_mux_store(struct device *d,  struct 
device_attribute *attr, c
+       if (kstrtou8(buf, 0, &mux_id))
+               return -EINVAL;
+ 
+-      /* mux_id [1 - 0x7f] range empirically found */
+-      if (mux_id < 1 || mux_id > 0x7f)
++      /* mux_id [1 - 254] for compatibility with ip(8) and the rmnet driver */
++      if (mux_id < 1 || mux_id > 254)
+               return -EINVAL;
+ 
+       if (!rtnl_trylock())
+@@ -418,7 +425,7 @@ static ssize_t del_mux_store(struct device *d,  struct 
device_attribute *attr, c
+               ret = -EINVAL;
+               goto err;
+       }
+-      qmimux_unregister_device(del_dev);
++      qmimux_unregister_device(del_dev, NULL);
+ 
+       if (!qmimux_has_slaves(dev))
+               info->flags &= ~QMI_WWAN_FLAG_MUX;
+@@ -1417,6 +1424,7 @@ static void qmi_wwan_disconnect(struct usb_interface 
*intf)
+       struct qmi_wwan_state *info;
+       struct list_head *iter;
+       struct net_device *ldev;
++      LIST_HEAD(list);
+ 
+       /* called twice if separate control and data intf */
+       if (!dev)
+@@ -1429,8 +1437,9 @@ static void qmi_wwan_disconnect(struct usb_interface 
*intf)
+               }
+               rcu_read_lock();
+               netdev_for_each_upper_dev_rcu(dev->net, ldev, iter)
+-                      qmimux_unregister_device(ldev);
++                      qmimux_unregister_device(ldev, &list);
+               rcu_read_unlock();
++              unregister_netdevice_many(&list);
+               rtnl_unlock();
+               info->flags &= ~QMI_WWAN_FLAG_MUX;
+       }
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c 
b/drivers/net/wireless/ath/carl9170/usb.c
+index e7c3f3b8457d..99f1897a775d 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -128,6 +128,8 @@ static const struct usb_device_id carl9170_usb_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
+ 
++static struct usb_driver carl9170_driver;
++
+ static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
+ {
+       struct urb *urb;
+@@ -966,32 +968,28 @@ err_out:
+ 
+ static void carl9170_usb_firmware_failed(struct ar9170 *ar)
+ {
+-      struct device *parent = ar->udev->dev.parent;
+-      struct usb_device *udev;
+-
+-      /*
+-       * Store a copy of the usb_device pointer locally.
+-       * This is because device_release_driver initiates
+-       * carl9170_usb_disconnect, which in turn frees our
+-       * driver context (ar).
++      /* Store a copies of the usb_interface and usb_device pointer locally.
++       * This is because release_driver initiates carl9170_usb_disconnect,
++       * which in turn frees our driver context (ar).
+        */
+-      udev = ar->udev;
++      struct usb_interface *intf = ar->intf;
++      struct usb_device *udev = ar->udev;
+ 
+       complete(&ar->fw_load_wait);
++      /* at this point 'ar' could be already freed. Don't use it anymore */
++      ar = NULL;
+ 
+       /* unbind anything failed */
+-      if (parent)
+-              device_lock(parent);
+-
+-      device_release_driver(&udev->dev);
+-      if (parent)
+-              device_unlock(parent);
++      usb_lock_device(udev);
++      usb_driver_release_interface(&carl9170_driver, intf);
++      usb_unlock_device(udev);
+ 
+-      usb_put_dev(udev);
++      usb_put_intf(intf);
+ }
+ 
+ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
+ {
++      struct usb_interface *intf = ar->intf;
+       int err;
+ 
+       err = carl9170_parse_firmware(ar);
+@@ -1009,7 +1007,7 @@ static void carl9170_usb_firmware_finish(struct ar9170 
*ar)
+               goto err_unrx;
+ 
+       complete(&ar->fw_load_wait);
+-      usb_put_dev(ar->udev);
++      usb_put_intf(intf);
+       return;
+ 
+ err_unrx:
+@@ -1052,7 +1050,6 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+               return PTR_ERR(ar);
+ 
+       udev = interface_to_usbdev(intf);
+-      usb_get_dev(udev);
+       ar->udev = udev;
+       ar->intf = intf;
+       ar->features = id->driver_info;
+@@ -1094,15 +1091,14 @@ static int carl9170_usb_probe(struct usb_interface 
*intf,
+       atomic_set(&ar->rx_anch_urbs, 0);
+       atomic_set(&ar->rx_pool_urbs, 0);
+ 
+-      usb_get_dev(ar->udev);
++      usb_get_intf(intf);
+ 
+       carl9170_set_state(ar, CARL9170_STOPPED);
+ 
+       err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
+               &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
+       if (err) {
+-              usb_put_dev(udev);
+-              usb_put_dev(udev);
++              usb_put_intf(intf);
+               carl9170_free(ar);
+       }
+       return err;
+@@ -1131,7 +1127,6 @@ static void carl9170_usb_disconnect(struct usb_interface 
*intf)
+ 
+       carl9170_release_firmware(ar);
+       carl9170_free(ar);
+-      usb_put_dev(udev);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c 
b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 99676d6c4713..6c10b8c4ddbe 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1509,7 +1509,6 @@ static void iwl_req_fw_callback(const struct firmware 
*ucode_raw, void *context)
+       goto free;
+ 
+  out_free_fw:
+-      iwl_dealloc_ucode(drv);
+       release_firmware(ucode_raw);
+  out_unbind:
+       complete(&drv->request_firmware_complete);
+diff --git a/drivers/net/wireless/intersil/p54/p54usb.c 
b/drivers/net/wireless/intersil/p54/p54usb.c
+index b0b86f701061..15661da6eedc 100644
+--- a/drivers/net/wireless/intersil/p54/p54usb.c
++++ b/drivers/net/wireless/intersil/p54/p54usb.c
+@@ -33,6 +33,8 @@ MODULE_ALIAS("prism54usb");
+ MODULE_FIRMWARE("isl3886usb");
+ MODULE_FIRMWARE("isl3887usb");
+ 
++static struct usb_driver p54u_driver;
++
+ /*
+  * Note:
+  *
+@@ -921,9 +923,9 @@ static void p54u_load_firmware_cb(const struct firmware 
*firmware,
+ {
+       struct p54u_priv *priv = context;
+       struct usb_device *udev = priv->udev;
++      struct usb_interface *intf = priv->intf;
+       int err;
+ 
+-      complete(&priv->fw_wait_load);
+       if (firmware) {
+               priv->fw = firmware;
+               err = p54u_start_ops(priv);
+@@ -932,26 +934,22 @@ static void p54u_load_firmware_cb(const struct firmware 
*firmware,
+               dev_err(&udev->dev, "Firmware not found.\n");
+       }
+ 
+-      if (err) {
+-              struct device *parent = priv->udev->dev.parent;
+-
+-              dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
+-
+-              if (parent)
+-                      device_lock(parent);
++      complete(&priv->fw_wait_load);
++      /*
++       * At this point p54u_disconnect may have already freed
++       * the "priv" context. Do not use it anymore!
++       */
++      priv = NULL;
+ 
+-              device_release_driver(&udev->dev);
+-              /*
+-               * At this point p54u_disconnect has already freed
+-               * the "priv" context. Do not use it anymore!
+-               */
+-              priv = NULL;
++      if (err) {
++              dev_err(&intf->dev, "failed to initialize device (%d)\n", err);
+ 
+-              if (parent)
+-                      device_unlock(parent);
++              usb_lock_device(udev);
++              usb_driver_release_interface(&p54u_driver, intf);
++              usb_unlock_device(udev);
+       }
+ 
+-      usb_put_dev(udev);
++      usb_put_intf(intf);
+ }
+ 
+ static int p54u_load_firmware(struct ieee80211_hw *dev,
+@@ -972,14 +970,14 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
+       dev_info(&priv->udev->dev, "Loading firmware file %s\n",
+              p54u_fwlist[i].fw);
+ 
+-      usb_get_dev(udev);
++      usb_get_intf(intf);
+       err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
+                                     device, GFP_KERNEL, priv,
+                                     p54u_load_firmware_cb);
+       if (err) {
+               dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
+                                         "(%d)!\n", p54u_fwlist[i].fw, err);
+-              usb_put_dev(udev);
++              usb_put_intf(intf);
+       }
+ 
+       return err;
+@@ -1011,8 +1009,6 @@ static int p54u_probe(struct usb_interface *intf,
+       skb_queue_head_init(&priv->rx_queue);
+       init_usb_anchor(&priv->submitted);
+ 
+-      usb_get_dev(udev);
+-
+       /* really lazy and simple way of figuring out if we're a 3887 */
+       /* TODO: should just stick the identification in the device table */
+       i = intf->altsetting->desc.bNumEndpoints;
+@@ -1053,10 +1049,8 @@ static int p54u_probe(struct usb_interface *intf,
+               priv->upload_fw = p54u_upload_firmware_net2280;
+       }
+       err = p54u_load_firmware(dev, intf);
+-      if (err) {
+-              usb_put_dev(udev);
++      if (err)
+               p54_free_common(dev);
+-      }
+       return err;
+ }
+ 
+@@ -1072,7 +1066,6 @@ static void p54u_disconnect(struct usb_interface *intf)
+       wait_for_completion(&priv->fw_wait_load);
+       p54_unregister_common(dev);
+ 
+-      usb_put_dev(interface_to_usbdev(intf));
+       release_firmware(priv->fw);
+       p54_free_common(dev);
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h 
b/drivers/net/wireless/marvell/mwifiex/fw.h
+index 9e75522d248a..342555ebafd7 100644
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
+@@ -1744,9 +1744,10 @@ struct mwifiex_ie_types_wmm_queue_status {
+ struct ieee_types_vendor_header {
+       u8 element_id;
+       u8 len;
+-      u8 oui[4];      /* 0~2: oui, 3: oui_type */
+-      u8 oui_subtype;
+-      u8 version;
++      struct {
++              u8 oui[3];
++              u8 oui_type;
++      } __packed oui;
+ } __packed;
+ 
+ struct ieee_types_wmm_parameter {
+@@ -1760,6 +1761,9 @@ struct ieee_types_wmm_parameter {
+        *   Version     [1]
+        */
+       struct ieee_types_vendor_header vend_hdr;
++      u8 oui_subtype;
++      u8 version;
++
+       u8 qos_info_bitmap;
+       u8 reserved;
+       struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
+@@ -1777,6 +1781,8 @@ struct ieee_types_wmm_info {
+        *   Version     [1]
+        */
+       struct ieee_types_vendor_header vend_hdr;
++      u8 oui_subtype;
++      u8 version;
+ 
+       u8 qos_info_bitmap;
+ } __packed;
+diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c 
b/drivers/net/wireless/marvell/mwifiex/ie.c
+index 922e3d69fd84..32853496fe8c 100644
+--- a/drivers/net/wireless/marvell/mwifiex/ie.c
++++ b/drivers/net/wireless/marvell/mwifiex/ie.c
+@@ -329,6 +329,8 @@ static int mwifiex_uap_parse_tail_ies(struct 
mwifiex_private *priv,
+       struct ieee80211_vendor_ie *vendorhdr;
+       u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
+       int left_len, parsed_len = 0;
++      unsigned int token_len;
++      int err = 0;
+ 
+       if (!info->tail || !info->tail_len)
+               return 0;
+@@ -344,6 +346,12 @@ static int mwifiex_uap_parse_tail_ies(struct 
mwifiex_private *priv,
+        */
+       while (left_len > sizeof(struct ieee_types_header)) {
+               hdr = (void *)(info->tail + parsed_len);
++              token_len = hdr->len + sizeof(struct ieee_types_header);
++              if (token_len > left_len) {
++                      err = -EINVAL;
++                      goto out;
++              }
++
+               switch (hdr->element_id) {
+               case WLAN_EID_SSID:
+               case WLAN_EID_SUPP_RATES:
+@@ -357,13 +365,16 @@ static int mwifiex_uap_parse_tail_ies(struct 
mwifiex_private *priv,
+               case WLAN_EID_VENDOR_SPECIFIC:
+                       break;
+               default:
+-                      memcpy(gen_ie->ie_buffer + ie_len, hdr,
+-                             hdr->len + sizeof(struct ieee_types_header));
+-                      ie_len += hdr->len + sizeof(struct ieee_types_header);
++                      if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
++                              err = -EINVAL;
++                              goto out;
++                      }
++                      memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len);
++                      ie_len += token_len;
+                       break;
+               }
+-              left_len -= hdr->len + sizeof(struct ieee_types_header);
+-              parsed_len += hdr->len + sizeof(struct ieee_types_header);
++              left_len -= token_len;
++              parsed_len += token_len;
+       }
+ 
+       /* parse only WPA vendor IE from tail, WMM IE is configured by
+@@ -373,15 +384,17 @@ static int mwifiex_uap_parse_tail_ies(struct 
mwifiex_private *priv,
+                                                   WLAN_OUI_TYPE_MICROSOFT_WPA,
+                                                   info->tail, info->tail_len);
+       if (vendorhdr) {
+-              memcpy(gen_ie->ie_buffer + ie_len, vendorhdr,
+-                     vendorhdr->len + sizeof(struct ieee_types_header));
+-              ie_len += vendorhdr->len + sizeof(struct ieee_types_header);
++              token_len = vendorhdr->len + sizeof(struct ieee_types_header);
++              if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
++                      err = -EINVAL;
++                      goto out;
++              }
++              memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len);
++              ie_len += token_len;
+       }
+ 
+-      if (!ie_len) {
+-              kfree(gen_ie);
+-              return 0;
+-      }
++      if (!ie_len)
++              goto out;
+ 
+       gen_ie->ie_index = cpu_to_le16(gen_idx);
+       gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
+@@ -391,13 +404,15 @@ static int mwifiex_uap_parse_tail_ies(struct 
mwifiex_private *priv,
+ 
+       if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
+                                        NULL, NULL)) {
+-              kfree(gen_ie);
+-              return -1;
++              err = -EINVAL;
++              goto out;
+       }
+ 
+       priv->gen_idx = gen_idx;
++
++ out:
+       kfree(gen_ie);
+-      return 0;
++      return err;
+ }
+ 
+ /* This function parses different IEs-head & tail IEs, beacon IEs,
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c 
b/drivers/net/wireless/marvell/mwifiex/scan.c
+index c9d41ed77fc7..29284f9a0646 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -1244,6 +1244,8 @@ int mwifiex_update_bss_desc_with_ie(struct 
mwifiex_adapter *adapter,
+               }
+               switch (element_id) {
+               case WLAN_EID_SSID:
++                      if (element_len > IEEE80211_MAX_SSID_LEN)
++                              return -EINVAL;
+                       bss_entry->ssid.ssid_len = element_len;
+                       memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
+                              element_len);
+@@ -1253,6 +1255,8 @@ int mwifiex_update_bss_desc_with_ie(struct 
mwifiex_adapter *adapter,
+                       break;
+ 
+               case WLAN_EID_SUPP_RATES:
++                      if (element_len > MWIFIEX_SUPPORTED_RATES)
++                              return -EINVAL;
+                       memcpy(bss_entry->data_rates, current_ptr + 2,
+                              element_len);
+                       memcpy(bss_entry->supported_rates, current_ptr + 2,
+@@ -1262,6 +1266,8 @@ int mwifiex_update_bss_desc_with_ie(struct 
mwifiex_adapter *adapter,
+                       break;
+ 
+               case WLAN_EID_FH_PARAMS:
++                      if (element_len + 2 < sizeof(*fh_param_set))
++                              return -EINVAL;
+                       fh_param_set =
+                               (struct ieee_types_fh_param_set *) current_ptr;
+                       memcpy(&bss_entry->phy_param_set.fh_param_set,
+@@ -1270,6 +1276,8 @@ int mwifiex_update_bss_desc_with_ie(struct 
mwifiex_adapter *adapter,
+                       break;
+ 
+               case WLAN_EID_DS_PARAMS:
++                      if (element_len + 2 < sizeof(*ds_param_set))
++                              return -EINVAL;
+                       ds_param_set =
+                               (struct ieee_types_ds_param_set *) current_ptr;
+ 
+@@ -1281,6 +1289,8 @@ int mwifiex_update_bss_desc_with_ie(struct 
mwifiex_adapter *adapter,
+                       break;
+ 
+               case WLAN_EID_CF_PARAMS:
++                      if (element_len + 2 < sizeof(*cf_param_set))
++                              return -EINVAL;
+                       cf_param_set =
+                               (struct ieee_types_cf_param_set *) current_ptr;
+                       memcpy(&bss_entry->ss_param_set.cf_param_set,
+@@ -1289,6 +1299,8 @@ int mwifiex_update_bss_desc_with_ie(struct 
mwifiex_adapter *adapter,
+                       break;
+ 
+               case WLAN_EID_IBSS_PARAMS:
++                      if (element_len + 2 < sizeof(*ibss_param_set))
++                              return -EINVAL;
+                       ibss_param_set =
+                               (struct ieee_types_ibss_param_set *)
+                               current_ptr;
+@@ -1298,10 +1310,14 @@ int mwifiex_update_bss_desc_with_ie(struct 
mwifiex_adapter *adapter,
+                       break;
+ 
+               case WLAN_EID_ERP_INFO:
++                      if (!element_len)
++                              return -EINVAL;
+                       bss_entry->erp_flags = *(current_ptr + 2);
+                       break;
+ 
+               case WLAN_EID_PWR_CONSTRAINT:
++                      if (!element_len)
++                              return -EINVAL;
+                       bss_entry->local_constraint = *(current_ptr + 2);
+                       bss_entry->sensed_11h = true;
+                       break;
+@@ -1344,15 +1360,22 @@ int mwifiex_update_bss_desc_with_ie(struct 
mwifiex_adapter *adapter,
+                       vendor_ie = (struct ieee_types_vendor_specific *)
+                                       current_ptr;
+ 
+-                      if (!memcmp
+-                          (vendor_ie->vend_hdr.oui, wpa_oui,
+-                           sizeof(wpa_oui))) {
++                      /* 802.11 requires at least 3-byte OUI. */
++                      if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
++                              return -EINVAL;
++
++                      /* Not long enough for a match? Skip it. */
++                      if (element_len < sizeof(wpa_oui))
++                              break;
++
++                      if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
++                                  sizeof(wpa_oui))) {
+                               bss_entry->bcn_wpa_ie =
+                                       (struct ieee_types_vendor_specific *)
+                                       current_ptr;
+                               bss_entry->wpa_offset = (u16)
+                                       (current_ptr - bss_entry->beacon_buf);
+-                      } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
++                      } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
+                                   sizeof(wmm_oui))) {
+                               if (total_ie_len ==
+                                   sizeof(struct ieee_types_wmm_parameter) ||
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 
b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+index a6077ab3efc3..82828a207963 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+@@ -1388,7 +1388,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, 
u8 *ie_data_ptr,
+                       /* Test to see if it is a WPA IE, if not, then
+                        * it is a gen IE
+                        */
+-                      if (!memcmp(pvendor_ie->oui, wpa_oui,
++                      if (!memcmp(&pvendor_ie->oui, wpa_oui,
+                                   sizeof(wpa_oui))) {
+                               /* IE is a WPA/WPA2 IE so call set_wpa function
+                                */
+@@ -1398,7 +1398,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, 
u8 *ie_data_ptr,
+                               goto next_ie;
+                       }
+ 
+-                      if (!memcmp(pvendor_ie->oui, wps_oui,
++                      if (!memcmp(&pvendor_ie->oui, wps_oui,
+                                   sizeof(wps_oui))) {
+                               /* Test to see if it is a WPS IE,
+                                * if so, enable wps session flag
+diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c 
b/drivers/net/wireless/marvell/mwifiex/wmm.c
+index 0edd26881321..7fba4d940131 100644
+--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
+@@ -240,7 +240,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private 
*priv,
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: WMM Parameter IE: version=%d,\t"
+                   "qos_info Parameter Set Count=%d, Reserved=%#x\n",
+-                  wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
++                  wmm_ie->version, wmm_ie->qos_info_bitmap &
+                   IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
+                   wmm_ie->reserved);
+ 
+diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
+index 35286907c636..d0090c5c88e7 100644
+--- a/drivers/s390/cio/qdio_setup.c
++++ b/drivers/s390/cio/qdio_setup.c
+@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, 
int nr_queues)
+                       return -ENOMEM;
+               }
+               irq_ptr_qs[i] = q;
++              INIT_LIST_HEAD(&q->entry);
+       }
+       return 0;
+ }
+@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct 
qdio_irq *irq_ptr,
+       q->mask = 1 << (31 - i);
+       q->nr = i;
+       q->handler = handler;
++      INIT_LIST_HEAD(&q->entry);
+ }
+ 
+ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
+index a739bdf9630e..831a3a0a2837 100644
+--- a/drivers/s390/cio/qdio_thinint.c
++++ b/drivers/s390/cio/qdio_thinint.c
+@@ -83,7 +83,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+       mutex_lock(&tiq_list_lock);
+       list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+       mutex_unlock(&tiq_list_lock);
+-      xchg(irq_ptr->dsci, 1 << 7);
+ }
+ 
+ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+@@ -91,14 +90,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+       struct qdio_q *q;
+ 
+       q = irq_ptr->input_qs[0];
+-      /* if establish triggered an error */
+-      if (!q || !q->entry.prev || !q->entry.next)
++      if (!q)
+               return;
+ 
+       mutex_lock(&tiq_list_lock);
+       list_del_rcu(&q->entry);
+       mutex_unlock(&tiq_list_lock);
+       synchronize_rcu();
++      INIT_LIST_HEAD(&q->entry);
+ }
+ 
+ static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
+diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c 
b/drivers/staging/comedi/drivers/amplc_pci230.c
+index 48c7890c3007..2b0b757dc626 100644
+--- a/drivers/staging/comedi/drivers/amplc_pci230.c
++++ b/drivers/staging/comedi/drivers/amplc_pci230.c
+@@ -2339,7 +2339,8 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
+       devpriv->intr_running = false;
+       spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
+ 
+-      comedi_handle_events(dev, s_ao);
++      if (s_ao)
++              comedi_handle_events(dev, s_ao);
+       comedi_handle_events(dev, s_ai);
+ 
+       return IRQ_HANDLED;
+diff --git a/drivers/staging/comedi/drivers/dt282x.c 
b/drivers/staging/comedi/drivers/dt282x.c
+index d5295bbdd28c..37133d54dda1 100644
+--- a/drivers/staging/comedi/drivers/dt282x.c
++++ b/drivers/staging/comedi/drivers/dt282x.c
+@@ -566,7 +566,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
+       }
+ #endif
+       comedi_handle_events(dev, s);
+-      comedi_handle_events(dev, s_ao);
++      if (s_ao)
++              comedi_handle_events(dev, s_ao);
+ 
+       return IRQ_RETVAL(handled);
+ }
+diff --git a/drivers/staging/iio/cdc/ad7150.c 
b/drivers/staging/iio/cdc/ad7150.c
+index a6f249e9c1e1..4d218d554878 100644
+--- a/drivers/staging/iio/cdc/ad7150.c
++++ b/drivers/staging/iio/cdc/ad7150.c
+@@ -6,6 +6,7 @@
+  * Licensed under the GPL-2 or later.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/interrupt.h>
+ #include <linux/device.h>
+ #include <linux/kernel.h>
+@@ -129,7 +130,7 @@ static int ad7150_read_event_config(struct iio_dev 
*indio_dev,
+ {
+       int ret;
+       u8 threshtype;
+-      bool adaptive;
++      bool thrfixed;
+       struct ad7150_chip_info *chip = iio_priv(indio_dev);
+ 
+       ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
+@@ -137,21 +138,23 @@ static int ad7150_read_event_config(struct iio_dev 
*indio_dev,
+               return ret;
+ 
+       threshtype = (ret >> 5) & 0x03;
+-      adaptive = !!(ret & 0x80);
++
++      /*check if threshold mode is fixed or adaptive*/
++      thrfixed = FIELD_GET(AD7150_CFG_FIX, ret);
+ 
+       switch (type) {
+       case IIO_EV_TYPE_MAG_ADAPTIVE:
+               if (dir == IIO_EV_DIR_RISING)
+-                      return adaptive && (threshtype == 0x1);
+-              return adaptive && (threshtype == 0x0);
++                      return !thrfixed && (threshtype == 0x1);
++              return !thrfixed && (threshtype == 0x0);
+       case IIO_EV_TYPE_THRESH_ADAPTIVE:
+               if (dir == IIO_EV_DIR_RISING)
+-                      return adaptive && (threshtype == 0x3);
+-              return adaptive && (threshtype == 0x2);
++                      return !thrfixed && (threshtype == 0x3);
++              return !thrfixed && (threshtype == 0x2);
+       case IIO_EV_TYPE_THRESH:
+               if (dir == IIO_EV_DIR_RISING)
+-                      return !adaptive && (threshtype == 0x1);
+-              return !adaptive && (threshtype == 0x0);
++                      return thrfixed && (threshtype == 0x1);
++              return thrfixed && (threshtype == 0x0);
+       default:
+               break;
+       }
+diff --git a/drivers/tty/serial/8250/8250_port.c 
b/drivers/tty/serial/8250/8250_port.c
+index ecf3d631bc09..ab0796d14ac1 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1873,8 +1873,7 @@ int serial8250_handle_irq(struct uart_port *port, 
unsigned int iir)
+ 
+       status = serial_port_in(port, UART_LSR);
+ 
+-      if (status & (UART_LSR_DR | UART_LSR_BI) &&
+-          iir & UART_IIR_RDI) {
++      if (status & (UART_LSR_DR | UART_LSR_BI)) {
+               if (!up->dma || handle_rx_dma(up, iir))
+                       status = serial8250_rx_chars(up, status);
+       }
+diff --git a/drivers/usb/gadget/function/u_ether.c 
b/drivers/usb/gadget/function/u_ether.c
+index 3a0e4f5d7b83..81d84e0c3c6c 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -190,11 +190,12 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, 
gfp_t gfp_flags)
+               out = dev->port_usb->out_ep;
+       else
+               out = NULL;
+-      spin_unlock_irqrestore(&dev->lock, flags);
+ 
+       if (!out)
++      {
++              spin_unlock_irqrestore(&dev->lock, flags);
+               return -ENOTCONN;
+-
++      }
+ 
+       /* Padding up to RX_EXTRA handles minor disagreements with host.
+        * Normally we use the USB "terminate on short read" convention;
+@@ -218,6 +219,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, 
gfp_t gfp_flags)
+ 
+       if (dev->port_usb->is_fixed)
+               size = max_t(size_t, size, dev->port_usb->fixed_out_len);
++      spin_unlock_irqrestore(&dev->lock, flags);
+ 
+       skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
+       if (skb == NULL) {
+diff --git a/drivers/usb/renesas_usbhs/fifo.c 
b/drivers/usb/renesas_usbhs/fifo.c
+index 5d369b38868a..b6d9308d43ba 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -818,9 +818,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, 
int map)
+ }
+ 
+ static void usbhsf_dma_complete(void *arg);
+-static void xfer_work(struct work_struct *work)
++static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
+ {
+-      struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
+       struct usbhs_pipe *pipe = pkt->pipe;
+       struct usbhs_fifo *fifo;
+       struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+@@ -828,12 +827,10 @@ static void xfer_work(struct work_struct *work)
+       struct dma_chan *chan;
+       struct device *dev = usbhs_priv_to_dev(priv);
+       enum dma_transfer_direction dir;
+-      unsigned long flags;
+ 
+-      usbhs_lock(priv, flags);
+       fifo = usbhs_pipe_to_fifo(pipe);
+       if (!fifo)
+-              goto xfer_work_end;
++              return;
+ 
+       chan = usbhsf_dma_chan_get(fifo, pkt);
+       dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+@@ -842,7 +839,7 @@ static void xfer_work(struct work_struct *work)
+                                       pkt->trans, dir,
+                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc)
+-              goto xfer_work_end;
++              return;
+ 
+       desc->callback          = usbhsf_dma_complete;
+       desc->callback_param    = pipe;
+@@ -850,7 +847,7 @@ static void xfer_work(struct work_struct *work)
+       pkt->cookie = dmaengine_submit(desc);
+       if (pkt->cookie < 0) {
+               dev_err(dev, "Failed to submit dma descriptor\n");
+-              goto xfer_work_end;
++              return;
+       }
+ 
+       dev_dbg(dev, "  %s %d (%d/ %d)\n",
+@@ -861,8 +858,17 @@ static void xfer_work(struct work_struct *work)
+       dma_async_issue_pending(chan);
+       usbhsf_dma_start(pipe, fifo);
+       usbhs_pipe_enable(pipe);
++}
++
++static void xfer_work(struct work_struct *work)
++{
++      struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
++      struct usbhs_pipe *pipe = pkt->pipe;
++      struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
++      unsigned long flags;
+ 
+-xfer_work_end:
++      usbhs_lock(priv, flags);
++      usbhsf_dma_xfer_preparing(pkt);
+       usbhs_unlock(priv, flags);
+ }
+ 
+@@ -915,8 +921,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, 
int *is_done)
+       pkt->trans = len;
+ 
+       usbhsf_tx_irq_ctrl(pipe, 0);
+-      INIT_WORK(&pkt->work, xfer_work);
+-      schedule_work(&pkt->work);
++      /* FIXME: Workaound for usb dmac that driver can be used in atomic */
++      if (usbhs_get_dparam(priv, has_usb_dmac)) {
++              usbhsf_dma_xfer_preparing(pkt);
++      } else {
++              INIT_WORK(&pkt->work, xfer_work);
++              schedule_work(&pkt->work);
++      }
+ 
+       return 0;
+ 
+@@ -1022,8 +1033,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct 
usbhs_pkt *pkt,
+ 
+       pkt->trans = pkt->length;
+ 
+-      INIT_WORK(&pkt->work, xfer_work);
+-      schedule_work(&pkt->work);
++      usbhsf_dma_xfer_preparing(pkt);
+ 
+       return 0;
+ 
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index e76395d7f17d..d2349c094767 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1024,6 +1024,7 @@ static const struct usb_device_id id_table_combined[] = {
+       { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+       /* EZPrototypes devices */
+       { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
++      { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+       { }                                     /* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h 
b/drivers/usb/serial/ftdi_sio_ids.h
+index 5755f0df0025..f12d806220b4 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1543,3 +1543,9 @@
+ #define CHETCO_SEASMART_DISPLAY_PID   0xA5AD /* SeaSmart NMEA2000 Display */
+ #define CHETCO_SEASMART_LITE_PID      0xA5AE /* SeaSmart Lite USB Adapter */
+ #define CHETCO_SEASMART_ANALOG_PID    0xA5AF /* SeaSmart Analog Adapter */
++
++/*
++ * Unjo AB
++ */
++#define UNJO_VID                      0x22B7
++#define UNJO_ISODEBUG_V1_PID          0x150D
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 3c8e4970876c..8b9e12ab1fe6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1346,6 +1346,7 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = RSVD(4) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 
0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 
0xff) },
++      { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0601, 0xff) },    /* 
GosunCn ZTE WeLink ME3630 (RNDIS mode) */
+       { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) },    /* 
GosunCn ZTE WeLink ME3630 (MBIM mode) */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 
0xff),
+         .driver_info = RSVD(4) },
+diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
+index a120649beeca..d13a154c8424 100644
+--- a/fs/crypto/policy.c
++++ b/fs/crypto/policy.c
+@@ -81,6 +81,8 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void 
__user *arg)
+       if (ret == -ENODATA) {
+               if (!S_ISDIR(inode->i_mode))
+                       ret = -ENOTDIR;
++              else if (IS_DEADDIR(inode))
++                      ret = -ENOENT;
+               else if (!inode->i_sb->s_cop->empty_dir(inode))
+                       ret = -ENOTEMPTY;
+               else
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 4cd0c2336624..9c81fd973418 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -1989,8 +1989,8 @@ int __dquot_transfer(struct inode *inode, struct dquot 
**transfer_to)
+                                      &warn_to[cnt]);
+               if (ret)
+                       goto over_quota;
+-              ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0,
+-                                    &warn_to[cnt]);
++              ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
++                                    DQUOT_SPACE_WARN, &warn_to[cnt]);
+               if (ret) {
+                       spin_lock(&transfer_to[cnt]->dq_dqb_lock);
+                       dquot_decr_inodes(transfer_to[cnt], inode_usage);
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 28b9d7cca29b..3c1b54091d6c 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -470,13 +470,15 @@ static struct buffer_head *udf_getblk(struct inode 
*inode, long block,
+       return NULL;
+ }
+ 
+-/* Extend the file by 'blocks' blocks, return the number of extents added */
++/* Extend the file with new blocks totaling 'new_block_bytes',
++ * return the number of extents added
++ */
+ static int udf_do_extend_file(struct inode *inode,
+                             struct extent_position *last_pos,
+                             struct kernel_long_ad *last_ext,
+-                            sector_t blocks)
++                            loff_t new_block_bytes)
+ {
+-      sector_t add;
++      uint32_t add;
+       int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+       struct super_block *sb = inode->i_sb;
+       struct kernel_lb_addr prealloc_loc = {};
+@@ -486,7 +488,7 @@ static int udf_do_extend_file(struct inode *inode,
+ 
+       /* The previous extent is fake and we should not extend by anything
+        * - there's nothing to do... */
+-      if (!blocks && fake)
++      if (!new_block_bytes && fake)
+               return 0;
+ 
+       iinfo = UDF_I(inode);
+@@ -517,13 +519,12 @@ static int udf_do_extend_file(struct inode *inode,
+       /* Can we merge with the previous extent? */
+       if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+                                       EXT_NOT_RECORDED_NOT_ALLOCATED) {
+-              add = ((1 << 30) - sb->s_blocksize -
+-                      (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
+-                      sb->s_blocksize_bits;
+-              if (add > blocks)
+-                      add = blocks;
+-              blocks -= add;
+-              last_ext->extLength += add << sb->s_blocksize_bits;
++              add = (1 << 30) - sb->s_blocksize -
++                      (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
++              if (add > new_block_bytes)
++                      add = new_block_bytes;
++              new_block_bytes -= add;
++              last_ext->extLength += add;
+       }
+ 
+       if (fake) {
+@@ -544,28 +545,27 @@ static int udf_do_extend_file(struct inode *inode,
+       }
+ 
+       /* Managed to do everything necessary? */
+-      if (!blocks)
++      if (!new_block_bytes)
+               goto out;
+ 
+       /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
+       last_ext->extLocation.logicalBlockNum = 0;
+       last_ext->extLocation.partitionReferenceNum = 0;
+-      add = (1 << (30-sb->s_blocksize_bits)) - 1;
+-      last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+-                              (add << sb->s_blocksize_bits);
++      add = (1 << 30) - sb->s_blocksize;
++      last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
+ 
+       /* Create enough extents to cover the whole hole */
+-      while (blocks > add) {
+-              blocks -= add;
++      while (new_block_bytes > add) {
++              new_block_bytes -= add;
+               err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+                                  last_ext->extLength, 1);
+               if (err)
+                       return err;
+               count++;
+       }
+-      if (blocks) {
++      if (new_block_bytes) {
+               last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+-                      (blocks << sb->s_blocksize_bits);
++                      new_block_bytes;
+               err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+                                  last_ext->extLength, 1);
+               if (err)
+@@ -596,6 +596,24 @@ out:
+       return count;
+ }
+ 
++/* Extend the final block of the file to final_block_len bytes */
++static void udf_do_extend_final_block(struct inode *inode,
++                                    struct extent_position *last_pos,
++                                    struct kernel_long_ad *last_ext,
++                                    uint32_t final_block_len)
++{
++      struct super_block *sb = inode->i_sb;
++      uint32_t added_bytes;
++
++      added_bytes = final_block_len -
++                    (last_ext->extLength & (sb->s_blocksize - 1));
++      last_ext->extLength += added_bytes;
++      UDF_I(inode)->i_lenExtents += added_bytes;
++
++      udf_write_aext(inode, last_pos, &last_ext->extLocation,
++                      last_ext->extLength, 1);
++}
++
+ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ {
+ 
+@@ -605,10 +623,12 @@ static int udf_extend_file(struct inode *inode, loff_t 
newsize)
+       int8_t etype;
+       struct super_block *sb = inode->i_sb;
+       sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
++      unsigned long partial_final_block;
+       int adsize;
+       struct udf_inode_info *iinfo = UDF_I(inode);
+       struct kernel_long_ad extent;
+-      int err;
++      int err = 0;
++      int within_final_block;
+ 
+       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+               adsize = sizeof(struct short_ad);
+@@ -618,18 +638,8 @@ static int udf_extend_file(struct inode *inode, loff_t 
newsize)
+               BUG();
+ 
+       etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
++      within_final_block = (etype != -1);
+ 
+-      /* File has extent covering the new size (could happen when extending
+-       * inside a block)? */
+-      if (etype != -1)
+-              return 0;
+-      if (newsize & (sb->s_blocksize - 1))
+-              offset++;
+-      /* Extended file just to the boundary of the last file block? */
+-      if (offset == 0)
+-              return 0;
+-
+-      /* Truncate is extending the file by 'offset' blocks */
+       if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+           (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+               /* File has no extents at all or has empty last
+@@ -643,7 +653,22 @@ static int udf_extend_file(struct inode *inode, loff_t 
newsize)
+                                     &extent.extLength, 0);
+               extent.extLength |= etype << 30;
+       }
+-      err = udf_do_extend_file(inode, &epos, &extent, offset);
++
++      partial_final_block = newsize & (sb->s_blocksize - 1);
++
++      /* File has extent covering the new size (could happen when extending
++       * inside a block)?
++       */
++      if (within_final_block) {
++              /* Extending file within the last file block */
++              udf_do_extend_final_block(inode, &epos, &extent,
++                                        partial_final_block);
++      } else {
++              loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
++                           partial_final_block;
++              err = udf_do_extend_file(inode, &epos, &extent, add);
++      }
++
+       if (err < 0)
+               goto out;
+       err = 0;
+@@ -745,6 +770,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t 
block,
+       /* Are we beyond EOF? */
+       if (etype == -1) {
+               int ret;
++              loff_t hole_len;
+               isBeyondEOF = true;
+               if (count) {
+                       if (c)
+@@ -760,7 +786,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t 
block,
+                       startnum = (offset > 0);
+               }
+               /* Create extents for the hole between EOF and offset */
+-              ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
++              hole_len = (loff_t)offset << inode->i_blkbits;
++              ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
+               if (ret < 0) {
+                       *err = ret;
+                       newblock = 0;
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 6376e2dcb0b7..0c78ad0cc515 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -163,6 +163,7 @@ enum cpuhp_state {
+       CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+       CPUHP_AP_WORKQUEUE_ONLINE,
+       CPUHP_AP_RCUTREE_ONLINE,
++      CPUHP_AP_BASE_CACHEINFO_ONLINE,
+       CPUHP_AP_ONLINE_DYN,
+       CPUHP_AP_ONLINE_DYN_END         = CPUHP_AP_ONLINE_DYN + 30,
+       CPUHP_AP_X86_HPET_ONLINE,
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 1c5469adaa85..bb7baecef002 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -101,7 +101,8 @@
+ #define DIV_ROUND_DOWN_ULL(ll, d) \
+       ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+ 
+-#define DIV_ROUND_UP_ULL(ll, d)               DIV_ROUND_DOWN_ULL((ll) + (d) - 
1, (d))
++#define DIV_ROUND_UP_ULL(ll, d) \
++      DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
+ 
+ #if BITS_PER_LONG == 32
+ # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
+diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
+index b724ef7005de..53c5e40a2a8f 100644
+--- a/include/linux/vmw_vmci_defs.h
++++ b/include/linux/vmw_vmci_defs.h
+@@ -68,9 +68,18 @@ enum {
+ 
+ /*
+  * A single VMCI device has an upper limit of 128MB on the amount of
+- * memory that can be used for queue pairs.
++ * memory that can be used for queue pairs. Since each queue pair
++ * consists of at least two pages, the memory limit also dictates the
++ * number of queue pairs a guest can create.
+  */
+ #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
++#define VMCI_MAX_GUEST_QP_COUNT  (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
++
++/*
++ * There can be at most PAGE_SIZE doorbells since there is one doorbell
++ * per byte in the doorbell bitmap page.
++ */
++#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
+ 
+ /*
+  * Queues with pre-mapped data pages must be small, so that we don't pin
+diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
+index d66f70f63734..3b0e3cdee1c3 100644
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -152,9 +152,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct 
sk_buff *skb,
+       memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+       pkt_len = skb->len - skb_inner_network_offset(skb);
+       err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
+-      if (unlikely(net_xmit_eval(err)))
+-              pkt_len = -1;
+-      iptunnel_xmit_stats(dev, pkt_len);
++
++      if (dev) {
++              if (unlikely(net_xmit_eval(err)))
++                      pkt_len = -1;
++              iptunnel_xmit_stats(dev, pkt_len);
++      }
+ }
+ #endif
+ #endif
+diff --git a/include/uapi/linux/nilfs2_ondisk.h 
b/include/uapi/linux/nilfs2_ondisk.h
+index a7e66ab11d1d..c23f91ae5fe8 100644
+--- a/include/uapi/linux/nilfs2_ondisk.h
++++ b/include/uapi/linux/nilfs2_ondisk.h
+@@ -29,7 +29,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/magic.h>
+-
++#include <asm/byteorder.h>
+ 
+ #define NILFS_INODE_BMAP_SIZE 7
+ 
+@@ -533,19 +533,19 @@ enum {
+ static inline void                                                    \
+ nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp)              \
+ {                                                                     \
+-      cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) |          \
+-                                 (1UL << NILFS_CHECKPOINT_##flag));   \
++      cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) |      \
++                                   (1UL << NILFS_CHECKPOINT_##flag)); \
+ }                                                                     \
+ static inline void                                                    \
+ nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp)            \
+ {                                                                     \
+-      cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) &          \
++      cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) &      \
+                                  ~(1UL << NILFS_CHECKPOINT_##flag));  \
+ }                                                                     \
+ static inline int                                                     \
+ nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp)            \
+ {                                                                     \
+-      return !!(le32_to_cpu(cp->cp_flags) &                           \
++      return !!(__le32_to_cpu(cp->cp_flags) &                         \
+                 (1UL << NILFS_CHECKPOINT_##flag));                    \
+ }
+ 
+@@ -595,20 +595,20 @@ enum {
+ static inline void                                                    \
+ nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su)                
\
+ {                                                                     \
+-      su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) |          \
++      su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) |      \
+                                  (1UL << NILFS_SEGMENT_USAGE_##flag));\
+ }                                                                     \
+ static inline void                                                    \
+ nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su)      \
+ {                                                                     \
+       su->su_flags =                                                  \
+-              cpu_to_le32(le32_to_cpu(su->su_flags) &                 \
++              __cpu_to_le32(__le32_to_cpu(su->su_flags) &             \
+                           ~(1UL << NILFS_SEGMENT_USAGE_##flag));      \
+ }                                                                     \
+ static inline int                                                     \
+ nilfs_segment_usage_##name(const struct nilfs_segment_usage *su)      \
+ {                                                                     \
+-      return !!(le32_to_cpu(su->su_flags) &                           \
++      return !!(__le32_to_cpu(su->su_flags) &                         \
+                 (1UL << NILFS_SEGMENT_USAGE_##flag));                 \
+ }
+ 
+@@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
+ static inline void
+ nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
+ {
+-      su->su_lastmod = cpu_to_le64(0);
+-      su->su_nblocks = cpu_to_le32(0);
+-      su->su_flags = cpu_to_le32(0);
++      su->su_lastmod = __cpu_to_le64(0);
++      su->su_nblocks = __cpu_to_le32(0);
++      su->su_flags = __cpu_to_le32(0);
+ }
+ 
+ static inline int
+ nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
+ {
+-      return !le32_to_cpu(su->su_flags);
++      return !__le32_to_cpu(su->su_flags);
+ }
+ 
+ /**
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index f370a0f43005..d768e15bef83 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1944,6 +1944,9 @@ static ssize_t write_cpuhp_fail(struct device *dev,
+       if (ret)
+               return ret;
+ 
++      if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
++              return -EINVAL;
++
+       /*
+        * Cannot fail STARTING/DYING callbacks.
+        */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 580616e6fcee..3d4eb6f840eb 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5630,7 +5630,7 @@ static void perf_sample_regs_user(struct perf_regs 
*regs_user,
+       if (user_mode(regs)) {
+               regs_user->abi = perf_reg_abi(current);
+               regs_user->regs = regs;
+-      } else if (current->mm) {
++      } else if (!(current->flags & PF_KTHREAD)) {
+               perf_get_regs_user(regs_user, regs, regs_user_copy);
+       } else {
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 9de9678fa7d0..46c85731d16f 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -959,6 +959,8 @@ static struct pernet_operations can_pernet_ops 
__read_mostly = {
+ 
+ static __init int can_init(void)
+ {
++      int err;
++
+       /* check for correct padding to be able to use the structs similarly */
+       BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
+                    offsetof(struct canfd_frame, len) ||
+@@ -972,15 +974,31 @@ static __init int can_init(void)
+       if (!rcv_cache)
+               return -ENOMEM;
+ 
+-      register_pernet_subsys(&can_pernet_ops);
++      err = register_pernet_subsys(&can_pernet_ops);
++      if (err)
++              goto out_pernet;
+ 
+       /* protocol register */
+-      sock_register(&can_family_ops);
+-      register_netdevice_notifier(&can_netdev_notifier);
++      err = sock_register(&can_family_ops);
++      if (err)
++              goto out_sock;
++      err = register_netdevice_notifier(&can_netdev_notifier);
++      if (err)
++              goto out_notifier;
++
+       dev_add_pack(&can_packet);
+       dev_add_pack(&canfd_packet);
+ 
+       return 0;
++
++out_notifier:
++      sock_unregister(PF_CAN);
++out_sock:
++      unregister_pernet_subsys(&can_pernet_ops);
++out_pernet:
++      kmem_cache_destroy(rcv_cache);
++
++      return err;
+ }
+ 
+ static __exit void can_exit(void)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 2b3b0307dd89..6d9fd7d4bdfa 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2299,6 +2299,7 @@ do_frag_list:
+               kv.iov_base = skb->data + offset;
+               kv.iov_len = slen;
+               memset(&msg, 0, sizeof(msg));
++              msg.msg_flags = MSG_DONTWAIT;
+ 
+               ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
+               if (ret <= 0)
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c 
b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index cb1b4772dac0..35d5a76867d0 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -265,8 +265,14 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, 
struct sk_buff *skb,
+ 
+       prev = fq->q.fragments_tail;
+       err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+-      if (err)
++      if (err) {
++              if (err == IPFRAG_DUP) {
++                      /* No error for duplicates, pretend they got queued. */
++                      kfree_skb(skb);
++                      return -EINPROGRESS;
++              }
+               goto insert_error;
++      }
+ 
+       if (dev)
+               fq->iif = dev->ifindex;
+@@ -293,15 +299,17 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, 
struct sk_buff *skb,
+               skb->_skb_refdst = 0UL;
+               err = nf_ct_frag6_reasm(fq, skb, prev, dev);
+               skb->_skb_refdst = orefdst;
+-              return err;
++
++              /* After queue has assumed skb ownership, only 0 or
++               * -EINPROGRESS must be returned.
++               */
++              return err ? -EINPROGRESS : 0;
+       }
+ 
+       skb_dst_drop(skb);
+       return -EINPROGRESS;
+ 
+ insert_error:
+-      if (err == IPFRAG_DUP)
+-              goto err;
+       inet_frag_kill(&fq->q);
+ err:
+       skb_dst_drop(skb);
+@@ -481,12 +489,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff 
*skb, u32 user)
+               ret = 0;
+       }
+ 
+-      /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
+-       * must be returned.
+-       */
+-      if (ret)
+-              ret = -EINPROGRESS;
+-
+       spin_unlock_bh(&fq->q.lock);
+       inet_frag_put(&fq->q);
+       return ret;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index a133acb43eb1..0e209a88d88a 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1405,7 +1405,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ 
+-      if (WARN_ON(!chanctx_conf)) {
++      if (WARN_ON_ONCE(!chanctx_conf)) {
+               rcu_read_unlock();
+               return NULL;
+       }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 96e57d7c2872..c6edae051e9b 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -922,6 +922,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data 
*sdata)
+ 
+       /* flush STAs and mpaths on this iface */
+       sta_info_flush(sdata);
++      ieee80211_free_keys(sdata, true);
+       mesh_path_flush_by_iface(sdata);
+ 
+       /* stop the beacon */
+@@ -1209,7 +1210,8 @@ int ieee80211_mesh_finish_csa(struct 
ieee80211_sub_if_data *sdata)
+       ifmsh->chsw_ttl = 0;
+ 
+       /* Remove the CSA and MCSP elements from the beacon */
+-      tmp_csa_settings = rcu_dereference(ifmsh->csa);
++      tmp_csa_settings = rcu_dereference_protected(ifmsh->csa,
++                                          lockdep_is_held(&sdata->wdev.mtx));
+       RCU_INIT_POINTER(ifmsh->csa, NULL);
+       if (tmp_csa_settings)
+               kfree_rcu(tmp_csa_settings, rcu_head);
+@@ -1231,6 +1233,8 @@ int ieee80211_mesh_csa_beacon(struct 
ieee80211_sub_if_data *sdata,
+       struct mesh_csa_settings *tmp_csa_settings;
+       int ret = 0;
+ 
++      lockdep_assert_held(&sdata->wdev.mtx);
++
+       tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
+                                  GFP_ATOMIC);
+       if (!tmp_csa_settings)
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 6d118357d9dc..9259529e0412 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2706,6 +2706,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
+       xprt = xprt_iter_xprt(&clnt->cl_xpi);
+       if (xps == NULL || xprt == NULL) {
+               rcu_read_unlock();
++              xprt_switch_put(xps);
+               return -EAGAIN;
+       }
+       resvport = xprt->resvport;
+diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
+index 2325d7ad76df..e8e8b756dc52 100644
+--- a/samples/bpf/bpf_load.c
++++ b/samples/bpf/bpf_load.c
+@@ -613,7 +613,7 @@ void read_trace_pipe(void)
+               static char buf[4096];
+               ssize_t sz;
+ 
+-              sz = read(trace_fd, buf, sizeof(buf));
++              sz = read(trace_fd, buf, sizeof(buf) - 1);
+               if (sz > 0) {
+                       buf[sz] = 0;
+                       puts(buf);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3552b4b1f902..20914a33ca5d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3114,6 +3114,7 @@ static void alc256_init(struct hda_codec *codec)
+       alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+       alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit 
*/
+       alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15);
++      alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep 
path to Line in path*/
+ }
+ 
+ static void alc256_shutup(struct hda_codec *codec)
+@@ -7218,7 +7219,6 @@ static int patch_alc269(struct hda_codec *codec)
+               spec->shutup = alc256_shutup;
+               spec->init_hook = alc256_init;
+               spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback 
mixer path */
+-              alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch 
pcbeep path to Line in path*/
+               break;
+       case 0x10ec0257:
+               spec->codec_variant = ALC269_TYPE_ALC257;
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index dc06f5e40041..526d808ecbbd 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -1677,6 +1677,7 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
+       mutex_unlock(&its->its_lock);
+ 
+       kfree(its);
++      kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
+ }
+ 
+ int vgic_its_has_attr_regs(struct kvm_device *dev,

Reply via email to