commit:     d40a062a74bcd8b78ded7bb3cbc35664ce34671e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 17 19:28:20 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 14 13:15:38 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d40a062a

Linux patch 4.18.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1001_linux-4.18.2.patch | 1679 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1683 insertions(+)

diff --git a/0000_README b/0000_README
index ad4a3ed..c801597 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-4.18.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.18.1
 
+Patch:  1001_linux-4.18.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.18.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-4.18.2.patch b/1001_linux-4.18.2.patch
new file mode 100644
index 0000000..1853255
--- /dev/null
+++ b/1001_linux-4.18.2.patch
@@ -0,0 +1,1679 @@
+diff --git a/Documentation/process/changes.rst 
b/Documentation/process/changes.rst
+index ddc029734b25..005d8842a503 100644
+--- a/Documentation/process/changes.rst
++++ b/Documentation/process/changes.rst
+@@ -35,7 +35,7 @@ binutils               2.20             ld -v
+ flex                   2.5.35           flex --version
+ bison                  2.0              bison --version
+ util-linux             2.10o            fdformat --version
+-module-init-tools      0.9.10           depmod -V
++kmod                   13               depmod -V
+ e2fsprogs              1.41.4           e2fsck -V
+ jfsutils               1.1.3            fsck.jfs -V
+ reiserfsprogs          3.6.3            reiserfsck -V
+@@ -156,12 +156,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way 
to rebuild and
+ reproduce the Oops with that option, then you can still decode that Oops
+ with ksymoops.
+ 
+-Module-Init-Tools
+------------------
+-
+-A new module loader is now in the kernel that requires ``module-init-tools``
+-to use.  It is backward compatible with the 2.4.x series kernels.
+-
+ Mkinitrd
+ --------
+ 
+@@ -371,16 +365,17 @@ Util-linux
+ 
+ - <https://www.kernel.org/pub/linux/utils/util-linux/>
+ 
++Kmod
++----
++
++- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
++- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
++
+ Ksymoops
+ --------
+ 
+ - <https://www.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
+ 
+-Module-Init-Tools
+------------------
+-
+-- <https://www.kernel.org/pub/linux/utils/kernel/module-init-tools/>
+-
+ Mkinitrd
+ --------
+ 
+diff --git a/Makefile b/Makefile
+index 5edf963148e8..fd409a0fd4e1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 18
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Merciless Moray
+ 
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 493ff75670ff..8ae5d7ae4af3 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -977,12 +977,12 @@ int pmd_clear_huge(pmd_t *pmdp)
+       return 1;
+ }
+ 
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       return pud_none(*pud);
+ }
+ 
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       return pmd_none(*pmd);
+ }
+diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S 
b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+index 16c4ccb1f154..d2364c55bbde 100644
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+@@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+       vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
+       vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
+       vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
+-      vmovd   _args_digest(state , idx, 4) , %xmm0
++      vmovd   _args_digest+4*32(state, idx, 4), %xmm1
+       vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
+       vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
+       vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
+diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
+index de27615c51ea..0c662cb6a723 100644
+--- a/arch/x86/hyperv/mmu.c
++++ b/arch/x86/hyperv/mmu.c
+@@ -95,6 +95,11 @@ static void hyperv_flush_tlb_others(const struct cpumask 
*cpus,
+       } else {
+               for_each_cpu(cpu, cpus) {
+                       vcpu = hv_cpu_number_to_vp_number(cpu);
++                      if (vcpu == VP_INVAL) {
++                              local_irq_restore(flags);
++                              goto do_native;
++                      }
++
+                       if (vcpu >= 64)
+                               goto do_native;
+ 
+diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
+index 5cdcdbd4d892..89789e8c80f6 100644
+--- a/arch/x86/include/asm/i8259.h
++++ b/arch/x86/include/asm/i8259.h
+@@ -3,6 +3,7 @@
+ #define _ASM_X86_I8259_H
+ 
+ #include <linux/delay.h>
++#include <asm/io.h>
+ 
+ extern unsigned int cached_irq_mask;
+ 
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c 
b/arch/x86/kernel/apic/x2apic_uv_x.c
+index d492752f79e1..391f358ebb4c 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -394,10 +394,10 @@ extern int uv_hub_info_version(void)
+ EXPORT_SYMBOL(uv_hub_info_version);
+ 
+ /* Default UV memory block size is 2GB */
+-static unsigned long mem_block_size = (2UL << 30);
++static unsigned long mem_block_size __initdata = (2UL << 30);
+ 
+ /* Kernel parameter to specify UV mem block size */
+-static int parse_mem_block_size(char *ptr)
++static int __init parse_mem_block_size(char *ptr)
+ {
+       unsigned long size = memparse(ptr, NULL);
+ 
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index c4f0ae49a53d..664f161f96ff 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -648,10 +648,9 @@ void x86_spec_ctrl_setup_ap(void)
+ enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+ EXPORT_SYMBOL_GPL(l1tf_mitigation);
+-
++#endif
+ enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+-#endif
+ 
+ static void __init l1tf_select_mitigation(void)
+ {
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 9eda6f730ec4..b41b72bd8bb8 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -905,7 +905,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+       apply_forced_caps(c);
+ }
+ 
+-static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
++void get_cpu_address_sizes(struct cpuinfo_x86 *c)
+ {
+       u32 eax, ebx, ecx, edx;
+ 
+diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
+index e59c0ea82a33..7b229afa0a37 100644
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -46,6 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
+                           *const __x86_cpu_dev_end[];
+ 
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
++extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+ extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
+ extern u32 get_scattered_cpuid_leaf(unsigned int level,
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index 7bb6f65c79de..29505724202a 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -1784,6 +1784,12 @@ int set_memory_nonglobal(unsigned long addr, int 
numpages)
+                                     __pgprot(_PAGE_GLOBAL), 0);
+ }
+ 
++int set_memory_global(unsigned long addr, int numpages)
++{
++      return change_page_attr_set(&addr, numpages,
++                                  __pgprot(_PAGE_GLOBAL), 0);
++}
++
+ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
+ {
+       struct cpa_data cpa;
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 47b5951e592b..e3deefb891da 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -719,28 +719,50 @@ int pmd_clear_huge(pmd_t *pmd)
+       return 0;
+ }
+ 
++#ifdef CONFIG_X86_64
+ /**
+  * pud_free_pmd_page - Clear pud entry and free pmd page.
+  * @pud: Pointer to a PUD.
++ * @addr: Virtual address associated with pud.
+  *
+- * Context: The pud range has been unmaped and TLB purged.
++ * Context: The pud range has been unmapped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
++ *
++ * NOTE: Callers must allow a single page allocation.
+  */
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+-      pmd_t *pmd;
++      pmd_t *pmd, *pmd_sv;
++      pte_t *pte;
+       int i;
+ 
+       if (pud_none(*pud))
+               return 1;
+ 
+       pmd = (pmd_t *)pud_page_vaddr(*pud);
++      pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
++      if (!pmd_sv)
++              return 0;
+ 
+-      for (i = 0; i < PTRS_PER_PMD; i++)
+-              if (!pmd_free_pte_page(&pmd[i]))
+-                      return 0;
++      for (i = 0; i < PTRS_PER_PMD; i++) {
++              pmd_sv[i] = pmd[i];
++              if (!pmd_none(pmd[i]))
++                      pmd_clear(&pmd[i]);
++      }
+ 
+       pud_clear(pud);
++
++      /* INVLPG to clear all paging-structure caches */
++      flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
++
++      for (i = 0; i < PTRS_PER_PMD; i++) {
++              if (!pmd_none(pmd_sv[i])) {
++                      pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
++                      free_page((unsigned long)pte);
++              }
++      }
++
++      free_page((unsigned long)pmd_sv);
+       free_page((unsigned long)pmd);
+ 
+       return 1;
+@@ -749,11 +771,12 @@ int pud_free_pmd_page(pud_t *pud)
+ /**
+  * pmd_free_pte_page - Clear pmd entry and free pte page.
+  * @pmd: Pointer to a PMD.
++ * @addr: Virtual address associated with pmd.
+  *
+- * Context: The pmd range has been unmaped and TLB purged.
++ * Context: The pmd range has been unmapped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+  */
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       pte_t *pte;
+ 
+@@ -762,8 +785,30 @@ int pmd_free_pte_page(pmd_t *pmd)
+ 
+       pte = (pte_t *)pmd_page_vaddr(*pmd);
+       pmd_clear(pmd);
++
++      /* INVLPG to clear all paging-structure caches */
++      flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
++
+       free_page((unsigned long)pte);
+ 
+       return 1;
+ }
++
++#else /* !CONFIG_X86_64 */
++
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
++{
++      return pud_none(*pud);
++}
++
++/*
++ * Disable free page handling on x86-PAE. This assures that ioremap()
++ * does not update sync'd pmd entries. See vmalloc_sync_one().
++ */
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
++{
++      return pmd_none(*pmd);
++}
++
++#endif /* CONFIG_X86_64 */
+ #endif        /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index fb752d9a3ce9..946455e9cfef 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -435,6 +435,13 @@ static inline bool pti_kernel_image_global_ok(void)
+       return true;
+ }
+ 
++/*
++ * This is the only user for these and it is not arch-generic
++ * like the other set_memory.h functions.  Just extern them.
++ */
++extern int set_memory_nonglobal(unsigned long addr, int numpages);
++extern int set_memory_global(unsigned long addr, int numpages);
++
+ /*
+  * For some configurations, map all of kernel text into the user page
+  * tables.  This reduces TLB misses, especially on non-PCID systems.
+@@ -447,7 +454,8 @@ void pti_clone_kernel_text(void)
+        * clone the areas past rodata, they might contain secrets.
+        */
+       unsigned long start = PFN_ALIGN(_text);
+-      unsigned long end = (unsigned long)__end_rodata_hpage_align;
++      unsigned long end_clone  = (unsigned long)__end_rodata_hpage_align;
++      unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
+ 
+       if (!pti_kernel_image_global_ok())
+               return;
+@@ -459,14 +467,18 @@ void pti_clone_kernel_text(void)
+        * pti_set_kernel_image_nonglobal() did to clear the
+        * global bit.
+        */
+-      pti_clone_pmds(start, end, _PAGE_RW);
++      pti_clone_pmds(start, end_clone, _PAGE_RW);
++
++      /*
++       * pti_clone_pmds() will set the global bit in any PMDs
++       * that it clones, but we also need to get any PTEs in
++       * the last level for areas that are not huge-page-aligned.
++       */
++
++      /* Set the global bit for normal non-__init kernel text: */
++      set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
+ }
+ 
+-/*
+- * This is the only user for it and it is not arch-generic like
+- * the other set_memory.h functions.  Just extern it.
+- */
+-extern int set_memory_nonglobal(unsigned long addr, int numpages);
+ void pti_set_kernel_image_nonglobal(void)
+ {
+       /*
+@@ -478,9 +490,11 @@ void pti_set_kernel_image_nonglobal(void)
+       unsigned long start = PFN_ALIGN(_text);
+       unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
+ 
+-      if (pti_kernel_image_global_ok())
+-              return;
+-
++      /*
++       * This clears _PAGE_GLOBAL from the entire kernel image.
++       * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
++       * areas that are mapped to userspace.
++       */
+       set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
+ }
+ 
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 439a94bf89ad..c5e3f2acc7f0 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1259,6 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
+       get_cpu_cap(&boot_cpu_data);
+       x86_configure_nx();
+ 
++      /* Determine virtual and physical address sizes */
++      get_cpu_address_sizes(&boot_cpu_data);
++
+       /* Let's presume PV guests always boot on vCPU with id 0. */
+       per_cpu(xen_vcpu_id, 0) = 0;
+ 
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index d880a4897159..4ee7c041bb82 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned 
int len)
+       return max(start, end_page);
+ }
+ 
+-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+-                                              unsigned int bsize)
++static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
++                                      unsigned int n)
+ {
+-      unsigned int n = bsize;
+-
+       for (;;) {
+               unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
+ 
+@@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct 
ablkcipher_walk *walk,
+               n -= len_this_page;
+               scatterwalk_start(&walk->out, sg_next(walk->out.sg));
+       }
+-
+-      return bsize;
+ }
+ 
+-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
+-                                              unsigned int n)
++static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
++                                      unsigned int n)
+ {
+       scatterwalk_advance(&walk->in, n);
+       scatterwalk_advance(&walk->out, n);
+-
+-      return n;
+ }
+ 
+ static int ablkcipher_walk_next(struct ablkcipher_request *req,
+@@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
+                        struct ablkcipher_walk *walk, int err)
+ {
+       struct crypto_tfm *tfm = req->base.tfm;
+-      unsigned int nbytes = 0;
++      unsigned int n; /* bytes processed */
++      bool more;
+ 
+-      if (likely(err >= 0)) {
+-              unsigned int n = walk->nbytes - err;
++      if (unlikely(err < 0))
++              goto finish;
+ 
+-              if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
+-                      n = ablkcipher_done_fast(walk, n);
+-              else if (WARN_ON(err)) {
+-                      err = -EINVAL;
+-                      goto err;
+-              } else
+-                      n = ablkcipher_done_slow(walk, n);
++      n = walk->nbytes - err;
++      walk->total -= n;
++      more = (walk->total != 0);
+ 
+-              nbytes = walk->total - n;
+-              err = 0;
++      if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
++              ablkcipher_done_fast(walk, n);
++      } else {
++              if (WARN_ON(err)) {
++                      /* unexpected case; didn't process all bytes */
++                      err = -EINVAL;
++                      goto finish;
++              }
++              ablkcipher_done_slow(walk, n);
+       }
+ 
+-      scatterwalk_done(&walk->in, 0, nbytes);
+-      scatterwalk_done(&walk->out, 1, nbytes);
+-
+-err:
+-      walk->total = nbytes;
+-      walk->nbytes = nbytes;
++      scatterwalk_done(&walk->in, 0, more);
++      scatterwalk_done(&walk->out, 1, more);
+ 
+-      if (nbytes) {
++      if (more) {
+               crypto_yield(req->base.flags);
+               return ablkcipher_walk_next(req, walk);
+       }
+-
++      err = 0;
++finish:
++      walk->nbytes = 0;
+       if (walk->iv != req->info)
+               memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
+       kfree(walk->iv_buffer);
+-
+       return err;
+ }
+ EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index 01c0d4aa2563..77b5fa293f66 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned 
int len)
+       return max(start, end_page);
+ }
+ 
+-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
+-                                             unsigned int bsize)
++static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
++                                     unsigned int bsize)
+ {
+       u8 *addr;
+ 
+       addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+       addr = blkcipher_get_spot(addr, bsize);
+       scatterwalk_copychunks(addr, &walk->out, bsize, 1);
+-      return bsize;
+ }
+ 
+-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
+-                                             unsigned int n)
++static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
++                                     unsigned int n)
+ {
+       if (walk->flags & BLKCIPHER_WALK_COPY) {
+               blkcipher_map_dst(walk);
+@@ -96,49 +95,48 @@ static inline unsigned int blkcipher_done_fast(struct 
blkcipher_walk *walk,
+ 
+       scatterwalk_advance(&walk->in, n);
+       scatterwalk_advance(&walk->out, n);
+-
+-      return n;
+ }
+ 
+ int blkcipher_walk_done(struct blkcipher_desc *desc,
+                       struct blkcipher_walk *walk, int err)
+ {
+-      unsigned int nbytes = 0;
++      unsigned int n; /* bytes processed */
++      bool more;
+ 
+-      if (likely(err >= 0)) {
+-              unsigned int n = walk->nbytes - err;
++      if (unlikely(err < 0))
++              goto finish;
+ 
+-              if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
+-                      n = blkcipher_done_fast(walk, n);
+-              else if (WARN_ON(err)) {
+-                      err = -EINVAL;
+-                      goto err;
+-              } else
+-                      n = blkcipher_done_slow(walk, n);
++      n = walk->nbytes - err;
++      walk->total -= n;
++      more = (walk->total != 0);
+ 
+-              nbytes = walk->total - n;
+-              err = 0;
++      if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
++              blkcipher_done_fast(walk, n);
++      } else {
++              if (WARN_ON(err)) {
++                      /* unexpected case; didn't process all bytes */
++                      err = -EINVAL;
++                      goto finish;
++              }
++              blkcipher_done_slow(walk, n);
+       }
+ 
+-      scatterwalk_done(&walk->in, 0, nbytes);
+-      scatterwalk_done(&walk->out, 1, nbytes);
++      scatterwalk_done(&walk->in, 0, more);
++      scatterwalk_done(&walk->out, 1, more);
+ 
+-err:
+-      walk->total = nbytes;
+-      walk->nbytes = nbytes;
+-
+-      if (nbytes) {
++      if (more) {
+               crypto_yield(desc->flags);
+               return blkcipher_walk_next(desc, walk);
+       }
+-
++      err = 0;
++finish:
++      walk->nbytes = 0;
+       if (walk->iv != desc->info)
+               memcpy(desc->info, walk->iv, walk->ivsize);
+       if (walk->buffer != walk->page)
+               kfree(walk->buffer);
+       if (walk->page)
+               free_page((unsigned long)walk->page);
+-
+       return err;
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_done);
+diff --git a/crypto/skcipher.c b/crypto/skcipher.c
+index 0fe2a2923ad0..5dc8407bdaa9 100644
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int 
len)
+       return max(start, end_page);
+ }
+ 
+-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
++static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+ {
+       u8 *addr;
+ 
+@@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk 
*walk, unsigned int bsize)
+       addr = skcipher_get_spot(addr, bsize);
+       scatterwalk_copychunks(addr, &walk->out, bsize,
+                              (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+-      return 0;
+ }
+ 
+ int skcipher_walk_done(struct skcipher_walk *walk, int err)
+ {
+-      unsigned int n = walk->nbytes - err;
+-      unsigned int nbytes;
+-
+-      nbytes = walk->total - n;
+-
+-      if (unlikely(err < 0)) {
+-              nbytes = 0;
+-              n = 0;
+-      } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+-                                         SKCIPHER_WALK_SLOW |
+-                                         SKCIPHER_WALK_COPY |
+-                                         SKCIPHER_WALK_DIFF)))) {
++      unsigned int n; /* bytes processed */
++      bool more;
++
++      if (unlikely(err < 0))
++              goto finish;
++
++      n = walk->nbytes - err;
++      walk->total -= n;
++      more = (walk->total != 0);
++
++      if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
++                                  SKCIPHER_WALK_SLOW |
++                                  SKCIPHER_WALK_COPY |
++                                  SKCIPHER_WALK_DIFF)))) {
+ unmap_src:
+               skcipher_unmap_src(walk);
+       } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+@@ -131,28 +132,28 @@ unmap_src:
+               skcipher_unmap_dst(walk);
+       } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+               if (WARN_ON(err)) {
++                      /* unexpected case; didn't process all bytes */
+                       err = -EINVAL;
+-                      nbytes = 0;
+-              } else
+-                      n = skcipher_done_slow(walk, n);
++                      goto finish;
++              }
++              skcipher_done_slow(walk, n);
++              goto already_advanced;
+       }
+ 
+-      if (err > 0)
+-              err = 0;
+-
+-      walk->total = nbytes;
+-      walk->nbytes = nbytes;
+-
+       scatterwalk_advance(&walk->in, n);
+       scatterwalk_advance(&walk->out, n);
+-      scatterwalk_done(&walk->in, 0, nbytes);
+-      scatterwalk_done(&walk->out, 1, nbytes);
++already_advanced:
++      scatterwalk_done(&walk->in, 0, more);
++      scatterwalk_done(&walk->out, 1, more);
+ 
+-      if (nbytes) {
++      if (more) {
+               crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
+                            CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+               return skcipher_walk_next(walk);
+       }
++      err = 0;
++finish:
++      walk->nbytes = 0;
+ 
+       /* Short-circuit for the common/fast path. */
+       if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+@@ -399,7 +400,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
+       unsigned size;
+       u8 *iv;
+ 
+-      aligned_bs = ALIGN(bs, alignmask);
++      aligned_bs = ALIGN(bs, alignmask + 1);
+ 
+       /* Minimum size to align buffer by alignmask. */
+       size = alignmask & ~a;
+diff --git a/crypto/vmac.c b/crypto/vmac.c
+index df76a816cfb2..bb2fc787d615 100644
+--- a/crypto/vmac.c
++++ b/crypto/vmac.c
+@@ -1,6 +1,10 @@
+ /*
+- * Modified to interface to the Linux kernel
++ * VMAC: Message Authentication Code using Universal Hashing
++ *
++ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
++ *
+  * Copyright (c) 2009, Intel Corporation.
++ * Copyright (c) 2018, Google Inc.
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms and conditions of the GNU General Public License,
+@@ -16,14 +20,15 @@
+  * Place - Suite 330, Boston, MA 02111-1307 USA.
+  */
+ 
+-/* --------------------------------------------------------------------------
+- * VMAC and VHASH Implementation by Ted Krovetz (t...@acm.org) and Wei Dai.
+- * This implementation is herby placed in the public domain.
+- * The authors offers no warranty. Use at your own risk.
+- * Please send bug reports to the authors.
+- * Last modified: 17 APR 08, 1700 PDT
+- * ----------------------------------------------------------------------- */
++/*
++ * Derived from:
++ *    VMAC and VHASH Implementation by Ted Krovetz (t...@acm.org) and Wei Dai.
++ *    This implementation is herby placed in the public domain.
++ *    The authors offers no warranty. Use at your own risk.
++ *    Last modified: 17 APR 08, 1700 PDT
++ */
+ 
++#include <asm/unaligned.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
+ #include <linux/crypto.h>
+@@ -31,9 +36,35 @@
+ #include <linux/scatterlist.h>
+ #include <asm/byteorder.h>
+ #include <crypto/scatterwalk.h>
+-#include <crypto/vmac.h>
+ #include <crypto/internal/hash.h>
+ 
++/*
++ * User definable settings.
++ */
++#define VMAC_TAG_LEN  64
++#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256                   */
++#define VMAC_KEY_LEN  (VMAC_KEY_SIZE/8)
++#define VMAC_NHBYTES  128/* Must 2^i for any 3 < i < 13 Standard = 128*/
++
++/* per-transform (per-key) context */
++struct vmac_tfm_ctx {
++      struct crypto_cipher *cipher;
++      u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
++      u64 polykey[2*VMAC_TAG_LEN/64];
++      u64 l3key[2*VMAC_TAG_LEN/64];
++};
++
++/* per-request context */
++struct vmac_desc_ctx {
++      union {
++              u8 partial[VMAC_NHBYTES];       /* partial block */
++              __le64 partial_words[VMAC_NHBYTES / 8];
++      };
++      unsigned int partial_size;      /* size of the partial block */
++      bool first_block_processed;
++      u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
++};
++
+ /*
+  * Constants and masks
+  */
+@@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
+       } while (0)
+ #endif
+ 
+-static void vhash_abort(struct vmac_ctx *ctx)
+-{
+-      ctx->polytmp[0] = ctx->polykey[0] ;
+-      ctx->polytmp[1] = ctx->polykey[1] ;
+-      ctx->first_block_processed = 0;
+-}
+-
+ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
+ {
+       u64 rh, rl, t, z = 0;
+@@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 
len)
+       return rl;
+ }
+ 
+-static void vhash_update(const unsigned char *m,
+-                      unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
+-                      struct vmac_ctx *ctx)
++/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
++static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
++                       struct vmac_desc_ctx *dctx,
++                       const __le64 *mptr, unsigned int blocks)
+ {
+-      u64 rh, rl, *mptr;
+-      const u64 *kptr = (u64 *)ctx->nhkey;
+-      int i;
+-      u64 ch, cl;
+-      u64 pkh = ctx->polykey[0];
+-      u64 pkl = ctx->polykey[1];
+-
+-      if (!mbytes)
+-              return;
+-
+-      BUG_ON(mbytes % VMAC_NHBYTES);
+-
+-      mptr = (u64 *)m;
+-      i = mbytes / VMAC_NHBYTES;  /* Must be non-zero */
+-
+-      ch = ctx->polytmp[0];
+-      cl = ctx->polytmp[1];
+-
+-      if (!ctx->first_block_processed) {
+-              ctx->first_block_processed = 1;
++      const u64 *kptr = tctx->nhkey;
++      const u64 pkh = tctx->polykey[0];
++      const u64 pkl = tctx->polykey[1];
++      u64 ch = dctx->polytmp[0];
++      u64 cl = dctx->polytmp[1];
++      u64 rh, rl;
++
++      if (!dctx->first_block_processed) {
++              dctx->first_block_processed = true;
+               nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+               rh &= m62;
+               ADD128(ch, cl, rh, rl);
+               mptr += (VMAC_NHBYTES/sizeof(u64));
+-              i--;
++              blocks--;
+       }
+ 
+-      while (i--) {
++      while (blocks--) {
+               nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+               rh &= m62;
+               poly_step(ch, cl, pkh, pkl, rh, rl);
+               mptr += (VMAC_NHBYTES/sizeof(u64));
+       }
+ 
+-      ctx->polytmp[0] = ch;
+-      ctx->polytmp[1] = cl;
++      dctx->polytmp[0] = ch;
++      dctx->polytmp[1] = cl;
+ }
+ 
+-static u64 vhash(unsigned char m[], unsigned int mbytes,
+-                      u64 *tagl, struct vmac_ctx *ctx)
++static int vmac_setkey(struct crypto_shash *tfm,
++                     const u8 *key, unsigned int keylen)
+ {
+-      u64 rh, rl, *mptr;
+-      const u64 *kptr = (u64 *)ctx->nhkey;
+-      int i, remaining;
+-      u64 ch, cl;
+-      u64 pkh = ctx->polykey[0];
+-      u64 pkl = ctx->polykey[1];
+-
+-      mptr = (u64 *)m;
+-      i = mbytes / VMAC_NHBYTES;
+-      remaining = mbytes % VMAC_NHBYTES;
+-
+-      if (ctx->first_block_processed) {
+-              ch = ctx->polytmp[0];
+-              cl = ctx->polytmp[1];
+-      } else if (i) {
+-              nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
+-              ch &= m62;
+-              ADD128(ch, cl, pkh, pkl);
+-              mptr += (VMAC_NHBYTES/sizeof(u64));
+-              i--;
+-      } else if (remaining) {
+-              nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
+-              ch &= m62;
+-              ADD128(ch, cl, pkh, pkl);
+-              mptr += (VMAC_NHBYTES/sizeof(u64));
+-              goto do_l3;
+-      } else {/* Empty String */
+-              ch = pkh; cl = pkl;
+-              goto do_l3;
+-      }
+-
+-      while (i--) {
+-              nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+-              rh &= m62;
+-              poly_step(ch, cl, pkh, pkl, rh, rl);
+-              mptr += (VMAC_NHBYTES/sizeof(u64));
+-      }
+-      if (remaining) {
+-              nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
+-              rh &= m62;
+-              poly_step(ch, cl, pkh, pkl, rh, rl);
+-      }
+-
+-do_l3:
+-      vhash_abort(ctx);
+-      remaining *= 8;
+-      return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
+-}
++      struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
++      __be64 out[2];
++      u8 in[16] = { 0 };
++      unsigned int i;
++      int err;
+ 
+-static u64 vmac(unsigned char m[], unsigned int mbytes,
+-                      const unsigned char n[16], u64 *tagl,
+-                      struct vmac_ctx_t *ctx)
+-{
+-      u64 *in_n, *out_p;
+-      u64 p, h;
+-      int i;
+-
+-      in_n = ctx->__vmac_ctx.cached_nonce;
+-      out_p = ctx->__vmac_ctx.cached_aes;
+-
+-      i = n[15] & 1;
+-      if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
+-              in_n[0] = *(u64 *)(n);
+-              in_n[1] = *(u64 *)(n+8);
+-              ((unsigned char *)in_n)[15] &= 0xFE;
+-              crypto_cipher_encrypt_one(ctx->child,
+-                      (unsigned char *)out_p, (unsigned char *)in_n);
+-
+-              ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
++      if (keylen != VMAC_KEY_LEN) {
++              crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++              return -EINVAL;
+       }
+-      p = be64_to_cpup(out_p + i);
+-      h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
+-      return le64_to_cpu(p + h);
+-}
+ 
+-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
+-{
+-      u64 in[2] = {0}, out[2];
+-      unsigned i;
+-      int err = 0;
+-
+-      err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
++      err = crypto_cipher_setkey(tctx->cipher, key, keylen);
+       if (err)
+               return err;
+ 
+       /* Fill nh key */
+-      ((unsigned char *)in)[0] = 0x80;
+-      for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
+-              crypto_cipher_encrypt_one(ctx->child,
+-                      (unsigned char *)out, (unsigned char *)in);
+-              ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
+-              ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
+-              ((unsigned char *)in)[15] += 1;
++      in[0] = 0x80;
++      for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
++              crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++              tctx->nhkey[i] = be64_to_cpu(out[0]);
++              tctx->nhkey[i+1] = be64_to_cpu(out[1]);
++              in[15]++;
+       }
+ 
+       /* Fill poly key */
+-      ((unsigned char *)in)[0] = 0xC0;
+-      in[1] = 0;
+-      for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
+-              crypto_cipher_encrypt_one(ctx->child,
+-                      (unsigned char *)out, (unsigned char *)in);
+-              ctx->__vmac_ctx.polytmp[i] =
+-                      ctx->__vmac_ctx.polykey[i] =
+-                              be64_to_cpup(out) & mpoly;
+-              ctx->__vmac_ctx.polytmp[i+1] =
+-                      ctx->__vmac_ctx.polykey[i+1] =
+-                              be64_to_cpup(out+1) & mpoly;
+-              ((unsigned char *)in)[15] += 1;
++      in[0] = 0xC0;
++      in[15] = 0;
++      for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
++              crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++              tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
++              tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
++              in[15]++;
+       }
+ 
+       /* Fill ip key */
+-      ((unsigned char *)in)[0] = 0xE0;
+-      in[1] = 0;
+-      for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
++      in[0] = 0xE0;
++      in[15] = 0;
++      for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
+               do {
+-                      crypto_cipher_encrypt_one(ctx->child,
+-                              (unsigned char *)out, (unsigned char *)in);
+-                      ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
+-                      ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
+-                      ((unsigned char *)in)[15] += 1;
+-              } while (ctx->__vmac_ctx.l3key[i] >= p64
+-                      || ctx->__vmac_ctx.l3key[i+1] >= p64);
++                      crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++                      tctx->l3key[i] = be64_to_cpu(out[0]);
++                      tctx->l3key[i+1] = be64_to_cpu(out[1]);
++                      in[15]++;
++              } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
+       }
+ 
+-      /* Invalidate nonce/aes cache and reset other elements */
+-      ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
+-      ctx->__vmac_ctx.cached_nonce[1] = (u64)0;  /* Ensure illegal nonce */
+-      ctx->__vmac_ctx.first_block_processed = 0;
+-
+-      return err;
++      return 0;
+ }
+ 
+-static int vmac_setkey(struct crypto_shash *parent,
+-              const u8 *key, unsigned int keylen)
++static int vmac_init(struct shash_desc *desc)
+ {
+-      struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
++      const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++      struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ 
+-      if (keylen != VMAC_KEY_LEN) {
+-              crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
+-              return -EINVAL;
+-      }
+-
+-      return vmac_set_key((u8 *)key, ctx);
+-}
+-
+-static int vmac_init(struct shash_desc *pdesc)
+-{
++      dctx->partial_size = 0;
++      dctx->first_block_processed = false;
++      memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
+       return 0;
+ }
+ 
+-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
+-              unsigned int len)
++static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
+ {
+-      struct crypto_shash *parent = pdesc->tfm;
+-      struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+-      int expand;
+-      int min;
+-
+-      expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
+-                      VMAC_NHBYTES - ctx->partial_size : 0;
+-
+-      min = len < expand ? len : expand;
+-
+-      memcpy(ctx->partial + ctx->partial_size, p, min);
+-      ctx->partial_size += min;
+-
+-      if (len < expand)
+-              return 0;
+-
+-      vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
+-      ctx->partial_size = 0;
+-
+-      len -= expand;
+-      p += expand;
++      const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++      struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
++      unsigned int n;
++
++      if (dctx->partial_size) {
++              n = min(len, VMAC_NHBYTES - dctx->partial_size);
++              memcpy(&dctx->partial[dctx->partial_size], p, n);
++              dctx->partial_size += n;
++              p += n;
++              len -= n;
++              if (dctx->partial_size == VMAC_NHBYTES) {
++                      vhash_blocks(tctx, dctx, dctx->partial_words, 1);
++                      dctx->partial_size = 0;
++              }
++      }
+ 
+-      if (len % VMAC_NHBYTES) {
+-              memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
+-                      len % VMAC_NHBYTES);
+-              ctx->partial_size = len % VMAC_NHBYTES;
++      if (len >= VMAC_NHBYTES) {
++              n = round_down(len, VMAC_NHBYTES);
++              /* TODO: 'p' may be misaligned here */
++              vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
++              p += n;
++              len -= n;
+       }
+ 
+-      vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
++      if (len) {
++              memcpy(dctx->partial, p, len);
++              dctx->partial_size = len;
++      }
+ 
+       return 0;
+ }
+ 
+-static int vmac_final(struct shash_desc *pdesc, u8 *out)
++static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
++                     struct vmac_desc_ctx *dctx)
+ {
+-      struct crypto_shash *parent = pdesc->tfm;
+-      struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+-      vmac_t mac;
+-      u8 nonce[16] = {};
+-
+-      /* vmac() ends up accessing outside the array bounds that
+-       * we specify.  In appears to access up to the next 2-word
+-       * boundary.  We'll just be uber cautious and zero the
+-       * unwritten bytes in the buffer.
+-       */
+-      if (ctx->partial_size) {
+-              memset(ctx->partial + ctx->partial_size, 0,
+-                      VMAC_NHBYTES - ctx->partial_size);
++      unsigned int partial = dctx->partial_size;
++      u64 ch = dctx->polytmp[0];
++      u64 cl = dctx->polytmp[1];
++
++      /* L1 and L2-hash the final block if needed */
++      if (partial) {
++              /* Zero-pad to next 128-bit boundary */
++              unsigned int n = round_up(partial, 16);
++              u64 rh, rl;
++
++              memset(&dctx->partial[partial], 0, n - partial);
++              nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
++              rh &= m62;
++              if (dctx->first_block_processed)
++                      poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
++                                rh, rl);
++              else
++                      ADD128(ch, cl, rh, rl);
+       }
+-      mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
+-      memcpy(out, &mac, sizeof(vmac_t));
+-      memzero_explicit(&mac, sizeof(vmac_t));
+-      memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+-      ctx->partial_size = 0;
++
++      /* L3-hash the 128-bit output of L2-hash */
++      return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
++}
++
++static int vmac_final(struct shash_desc *desc, u8 *out)
++{
++      const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++      struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
++      static const u8 nonce[16] = {}; /* TODO: this is insecure */
++      union {
++              u8 bytes[16];
++              __be64 pads[2];
++      } block;
++      int index;
++      u64 hash, pad;
++
++      /* Finish calculating the VHASH of the message */
++      hash = vhash_final(tctx, dctx);
++
++      /* Generate pseudorandom pad by encrypting the nonce */
++      memcpy(&block, nonce, 16);
++      index = block.bytes[15] & 1;
++      block.bytes[15] &= ~1;
++      crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
++      pad = be64_to_cpu(block.pads[index]);
++
++      /* The VMAC is the sum of VHASH and the pseudorandom pad */
++      put_unaligned_le64(hash + pad, out);
+       return 0;
+ }
+ 
+ static int vmac_init_tfm(struct crypto_tfm *tfm)
+ {
+-      struct crypto_cipher *cipher;
+-      struct crypto_instance *inst = (void *)tfm->__crt_alg;
++      struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+       struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+-      struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
++      struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
++      struct crypto_cipher *cipher;
+ 
+       cipher = crypto_spawn_cipher(spawn);
+       if (IS_ERR(cipher))
+               return PTR_ERR(cipher);
+ 
+-      ctx->child = cipher;
++      tctx->cipher = cipher;
+       return 0;
+ }
+ 
+ static void vmac_exit_tfm(struct crypto_tfm *tfm)
+ {
+-      struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+-      crypto_free_cipher(ctx->child);
++      struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
++
++      crypto_free_cipher(tctx->cipher);
+ }
+ 
+ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+@@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, 
struct rtattr **tb)
+       if (IS_ERR(alg))
+               return PTR_ERR(alg);
+ 
++      err = -EINVAL;
++      if (alg->cra_blocksize != 16)
++              goto out_put_alg;
++
+       inst = shash_alloc_instance("vmac", alg);
+       err = PTR_ERR(inst);
+       if (IS_ERR(inst))
+@@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, 
struct rtattr **tb)
+       inst->alg.base.cra_blocksize = alg->cra_blocksize;
+       inst->alg.base.cra_alignmask = alg->cra_alignmask;
+ 
+-      inst->alg.digestsize = sizeof(vmac_t);
+-      inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
++      inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
+       inst->alg.base.cra_init = vmac_init_tfm;
+       inst->alg.base.cra_exit = vmac_exit_tfm;
+ 
++      inst->alg.descsize = sizeof(struct vmac_desc_ctx);
++      inst->alg.digestsize = VMAC_TAG_LEN / 8;
+       inst->alg.init = vmac_init;
+       inst->alg.update = vmac_update;
+       inst->alg.final = vmac_final;
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index ff478d826d7d..051b8c6bae64 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -84,8 +84,6 @@ done:
+ 
+ static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+ {
+-      psp->sev_int_rcvd = 0;
+-
+       wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+       *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ }
+@@ -148,6 +146,8 @@ static int __sev_do_cmd_locked(int cmd, void *data, int 
*psp_ret)
+       iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
+       iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+ 
++      psp->sev_int_rcvd = 0;
++
+       reg = cmd;
+       reg <<= PSP_CMDRESP_CMD_SHIFT;
+       reg |= PSP_CMDRESP_IOC;
+@@ -856,6 +856,9 @@ void psp_dev_destroy(struct sp_device *sp)
+ {
+       struct psp_device *psp = sp->psp_data;
+ 
++      if (!psp)
++              return;
++
+       if (psp->sev_misc)
+               kref_put(&misc_dev->refcount, sev_exit);
+ 
+diff --git a/drivers/crypto/ccree/cc_cipher.c 
b/drivers/crypto/ccree/cc_cipher.c
+index d2810c183b73..958ced3ca485 100644
+--- a/drivers/crypto/ccree/cc_cipher.c
++++ b/drivers/crypto/ccree/cc_cipher.c
+@@ -593,34 +593,82 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+       }
+ }
+ 
++/*
++ * Update a CTR-AES 128 bit counter
++ */
++static void cc_update_ctr(u8 *ctr, unsigned int increment)
++{
++      if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
++          IS_ALIGNED((unsigned long)ctr, 8)) {
++
++              __be64 *high_be = (__be64 *)ctr;
++              __be64 *low_be = high_be + 1;
++              u64 orig_low = __be64_to_cpu(*low_be);
++              u64 new_low = orig_low + (u64)increment;
++
++              *low_be = __cpu_to_be64(new_low);
++
++              if (new_low < orig_low)
++                      *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
++      } else {
++              u8 *pos = (ctr + AES_BLOCK_SIZE);
++              u8 val;
++              unsigned int size;
++
++              for (; increment; increment--)
++                      for (size = AES_BLOCK_SIZE; size; size--) {
++                              val = *--pos + 1;
++                              *pos = val;
++                              if (val)
++                                      break;
++                      }
++      }
++}
++
+ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
+ {
+       struct skcipher_request *req = (struct skcipher_request *)cc_req;
+       struct scatterlist *dst = req->dst;
+       struct scatterlist *src = req->src;
+       struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+-      struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+-      unsigned int ivsize = crypto_skcipher_ivsize(tfm);
++      struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
++      struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
++      struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
++      unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
++      unsigned int len;
+ 
+-      cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+-      kzfree(req_ctx->iv);
++      switch (ctx_p->cipher_mode) {
++      case DRV_CIPHER_CBC:
++              /*
++               * The crypto API expects us to set the req->iv to the last
++               * ciphertext block. For encrypt, simply copy from the result.
++               * For decrypt, we must copy from a saved buffer since this
++               * could be an in-place decryption operation and the src is
++               * lost by this point.
++               */
++              if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
++                      memcpy(req->iv, req_ctx->backup_info, ivsize);
++                      kzfree(req_ctx->backup_info);
++              } else if (!err) {
++                      len = req->cryptlen - ivsize;
++                      scatterwalk_map_and_copy(req->iv, req->dst, len,
++                                               ivsize, 0);
++              }
++              break;
+ 
+-      /*
+-       * The crypto API expects us to set the req->iv to the last
+-       * ciphertext block. For encrypt, simply copy from the result.
+-       * For decrypt, we must copy from a saved buffer since this
+-       * could be an in-place decryption operation and the src is
+-       * lost by this point.
+-       */
+-      if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
+-              memcpy(req->iv, req_ctx->backup_info, ivsize);
+-              kzfree(req_ctx->backup_info);
+-      } else if (!err) {
+-              scatterwalk_map_and_copy(req->iv, req->dst,
+-                                       (req->cryptlen - ivsize),
+-                                       ivsize, 0);
++      case DRV_CIPHER_CTR:
++              /* Compute the counter of the last block */
++              len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
++              cc_update_ctr((u8 *)req->iv, len);
++              break;
++
++      default:
++              break;
+       }
+ 
++      cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
++      kzfree(req_ctx->iv);
++
+       skcipher_request_complete(req, err);
+ }
+ 
+@@ -752,20 +800,29 @@ static int cc_cipher_encrypt(struct skcipher_request 
*req)
+ static int cc_cipher_decrypt(struct skcipher_request *req)
+ {
+       struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
++      struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
++      struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+       struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+       unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+       gfp_t flags = cc_gfp_flags(&req->base);
++      unsigned int len;
+ 
+-      /*
+-       * Allocate and save the last IV sized bytes of the source, which will
+-       * be lost in case of in-place decryption and might be needed for CTS.
+-       */
+-      req_ctx->backup_info = kmalloc(ivsize, flags);
+-      if (!req_ctx->backup_info)
+-              return -ENOMEM;
++      if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
++
++              /* Allocate and save the last IV sized bytes of the source,
++               * which will be lost in case of in-place decryption.
++               */
++              req_ctx->backup_info = kzalloc(ivsize, flags);
++              if (!req_ctx->backup_info)
++                      return -ENOMEM;
++
++              len = req->cryptlen - ivsize;
++              scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
++                                       ivsize, 0);
++      } else {
++              req_ctx->backup_info = NULL;
++      }
+ 
+-      scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
+-                               (req->cryptlen - ivsize), ivsize, 0);
+       req_ctx->is_giv = false;
+ 
+       return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
+index 96ff777474d7..e4ebde05a8a0 100644
+--- a/drivers/crypto/ccree/cc_hash.c
++++ b/drivers/crypto/ccree/cc_hash.c
+@@ -602,66 +602,7 @@ static int cc_hash_update(struct ahash_request *req)
+       return rc;
+ }
+ 
+-static int cc_hash_finup(struct ahash_request *req)
+-{
+-      struct ahash_req_ctx *state = ahash_request_ctx(req);
+-      struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+-      struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+-      u32 digestsize = crypto_ahash_digestsize(tfm);
+-      struct scatterlist *src = req->src;
+-      unsigned int nbytes = req->nbytes;
+-      u8 *result = req->result;
+-      struct device *dev = drvdata_to_dev(ctx->drvdata);
+-      bool is_hmac = ctx->is_hmac;
+-      struct cc_crypto_req cc_req = {};
+-      struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+-      unsigned int idx = 0;
+-      int rc;
+-      gfp_t flags = cc_gfp_flags(&req->base);
+-
+-      dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
+-              nbytes);
+-
+-      if (cc_map_req(dev, state, ctx)) {
+-              dev_err(dev, "map_ahash_source() failed\n");
+-              return -EINVAL;
+-      }
+-
+-      if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+-                                    flags)) {
+-              dev_err(dev, "map_ahash_request_final() failed\n");
+-              cc_unmap_req(dev, state, ctx);
+-              return -ENOMEM;
+-      }
+-      if (cc_map_result(dev, state, digestsize)) {
+-              dev_err(dev, "map_ahash_digest() failed\n");
+-              cc_unmap_hash_request(dev, state, src, true);
+-              cc_unmap_req(dev, state, ctx);
+-              return -ENOMEM;
+-      }
+-
+-      /* Setup request structure */
+-      cc_req.user_cb = cc_hash_complete;
+-      cc_req.user_arg = req;
+-
+-      idx = cc_restore_hash(desc, ctx, state, idx);
+-
+-      if (is_hmac)
+-              idx = cc_fin_hmac(desc, req, idx);
+-
+-      idx = cc_fin_result(desc, req, idx);
+-
+-      rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+-      if (rc != -EINPROGRESS && rc != -EBUSY) {
+-              dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+-              cc_unmap_hash_request(dev, state, src, true);
+-              cc_unmap_result(dev, state, digestsize, result);
+-              cc_unmap_req(dev, state, ctx);
+-      }
+-      return rc;
+-}
+-
+-static int cc_hash_final(struct ahash_request *req)
++static int cc_do_finup(struct ahash_request *req, bool update)
+ {
+       struct ahash_req_ctx *state = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -678,21 +619,20 @@ static int cc_hash_final(struct ahash_request *req)
+       int rc;
+       gfp_t flags = cc_gfp_flags(&req->base);
+ 
+-      dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
+-              nbytes);
++      dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
++              update ? "finup" : "final", nbytes);
+ 
+       if (cc_map_req(dev, state, ctx)) {
+               dev_err(dev, "map_ahash_source() failed\n");
+               return -EINVAL;
+       }
+ 
+-      if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
++      if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
+                                     flags)) {
+               dev_err(dev, "map_ahash_request_final() failed\n");
+               cc_unmap_req(dev, state, ctx);
+               return -ENOMEM;
+       }
+-
+       if (cc_map_result(dev, state, digestsize)) {
+               dev_err(dev, "map_ahash_digest() failed\n");
+               cc_unmap_hash_request(dev, state, src, true);
+@@ -706,7 +646,7 @@ static int cc_hash_final(struct ahash_request *req)
+ 
+       idx = cc_restore_hash(desc, ctx, state, idx);
+ 
+-      /* "DO-PAD" must be enabled only when writing current length to HW */
++      /* Pad the hash */
+       hw_desc_init(&desc[idx]);
+       set_cipher_do(&desc[idx], DO_PAD);
+       set_cipher_mode(&desc[idx], ctx->hw_mode);
+@@ -731,6 +671,17 @@ static int cc_hash_final(struct ahash_request *req)
+       return rc;
+ }
+ 
++static int cc_hash_finup(struct ahash_request *req)
++{
++      return cc_do_finup(req, true);
++}
++
++
++static int cc_hash_final(struct ahash_request *req)
++{
++      return cc_do_finup(req, false);
++}
++
+ static int cc_hash_init(struct ahash_request *req)
+ {
+       struct ahash_req_ctx *state = ahash_request_ctx(req);
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index 26ca0276b503..a75cb371cd19 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t 
prot);
+ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+ int pud_clear_huge(pud_t *pud);
+ int pmd_clear_huge(pmd_t *pmd);
+-int pud_free_pmd_page(pud_t *pud);
+-int pmd_free_pte_page(pmd_t *pmd);
++int pud_free_pmd_page(pud_t *pud, unsigned long addr);
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+ #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+ static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+ {
+@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
+ {
+       return 0;
+ }
+-static inline int pud_free_pmd_page(pud_t *pud)
++static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       return 0;
+ }
+-static inline int pmd_free_pte_page(pmd_t *pmd)
++static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       return 0;
+ }
+diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
+deleted file mode 100644
+index 6b700c7b2fe1..000000000000
+--- a/include/crypto/vmac.h
++++ /dev/null
+@@ -1,63 +0,0 @@
+-/*
+- * Modified to interface to the Linux kernel
+- * Copyright (c) 2009, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along 
with
+- * this program; if not, write to the Free Software Foundation, Inc., 59 
Temple
+- * Place - Suite 330, Boston, MA 02111-1307 USA.
+- */
+-
+-#ifndef __CRYPTO_VMAC_H
+-#define __CRYPTO_VMAC_H
+-
+-/* --------------------------------------------------------------------------
+- * VMAC and VHASH Implementation by Ted Krovetz (t...@acm.org) and Wei Dai.
+- * This implementation is herby placed in the public domain.
+- * The authors offers no warranty. Use at your own risk.
+- * Please send bug reports to the authors.
+- * Last modified: 17 APR 08, 1700 PDT
+- * ----------------------------------------------------------------------- */
+-
+-/*
+- * User definable settings.
+- */
+-#define VMAC_TAG_LEN  64
+-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256                   */
+-#define VMAC_KEY_LEN  (VMAC_KEY_SIZE/8)
+-#define VMAC_NHBYTES  128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+-
+-/*
+- * This implementation uses u32 and u64 as names for unsigned 32-
+- * and 64-bit integer types. These are defined in C99 stdint.h. The
+- * following may need adaptation if you are not running a C99 or
+- * Microsoft C environment.
+- */
+-struct vmac_ctx {
+-      u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+-      u64 polykey[2*VMAC_TAG_LEN/64];
+-      u64 l3key[2*VMAC_TAG_LEN/64];
+-      u64 polytmp[2*VMAC_TAG_LEN/64];
+-      u64 cached_nonce[2];
+-      u64 cached_aes[2];
+-      int first_block_processed;
+-};
+-
+-typedef u64 vmac_t;
+-
+-struct vmac_ctx_t {
+-      struct crypto_cipher *child;
+-      struct vmac_ctx __vmac_ctx;
+-      u8 partial[VMAC_NHBYTES];       /* partial block */
+-      int partial_size;               /* size of the partial block */
+-};
+-
+-#endif /* __CRYPTO_VMAC_H */
+diff --git a/lib/ioremap.c b/lib/ioremap.c
+index 54e5bbaa3200..517f5853ffed 100644
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned 
long addr,
+               if (ioremap_pmd_enabled() &&
+                   ((next - addr) == PMD_SIZE) &&
+                   IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
+-                  pmd_free_pte_page(pmd)) {
++                  pmd_free_pte_page(pmd, addr)) {
+                       if (pmd_set_huge(pmd, phys_addr + addr, prot))
+                               continue;
+               }
+@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned 
long addr,
+               if (ioremap_pud_enabled() &&
+                   ((next - addr) == PUD_SIZE) &&
+                   IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
+-                  pud_free_pmd_page(pud)) {
++                  pud_free_pmd_page(pud, addr)) {
+                       if (pud_set_huge(pud, phys_addr + addr, prot))
+                               continue;
+               }
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 1036e4fa1ea2..3bba8f4b08a9 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
+               del_timer(&session->timer);
+ }
+ 
+-static void hidp_process_report(struct hidp_session *session,
+-                              int type, const u8 *data, int len, int intr)
++static void hidp_process_report(struct hidp_session *session, int type,
++                              const u8 *data, unsigned int len, int intr)
+ {
+       if (len > HID_MAX_BUFFER_SIZE)
+               len = HID_MAX_BUFFER_SIZE;
+diff --git a/scripts/depmod.sh b/scripts/depmod.sh
+index 1a6f85e0e6e1..999d585eaa73 100755
+--- a/scripts/depmod.sh
++++ b/scripts/depmod.sh
+@@ -10,10 +10,16 @@ fi
+ DEPMOD=$1
+ KERNELRELEASE=$2
+ 
+-if ! test -r System.map -a -x "$DEPMOD"; then
++if ! test -r System.map ; then
+       exit 0
+ fi
+ 
++if [ -z $(command -v $DEPMOD) ]; then
++      echo "'make modules_install' requires $DEPMOD. Please install it." >&2
++      echo "This is probably in the kmod package." >&2
++      exit 1
++fi
++
+ # older versions of depmod require the version string to start with three
+ # numbers, so we cheat with a symlink here
+ depmod_hack_needed=true

Reply via email to