commit:     1e4d06678c3d3c6bef6c60ef5d05ec227fc8d26d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 10 11:43:09 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan 10 11:43:09 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1e4d0667

Linux patch 4.14.13

 0000_README              |    4 +
 1012_linux-4.14.13.patch | 1518 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1522 insertions(+)

diff --git a/0000_README b/0000_README
index af8a1d1..1abd05e 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-4.14.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.12
 
+Patch:  1012_linux-4.14.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-4.14.13.patch b/1012_linux-4.14.13.patch
new file mode 100644
index 0000000..faee46a
--- /dev/null
+++ b/1012_linux-4.14.13.patch
@@ -0,0 +1,1518 @@
+diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
+index ad41b3813f0a..ea91cb61a602 100644
+--- a/Documentation/x86/x86_64/mm.txt
++++ b/Documentation/x86/x86_64/mm.txt
+@@ -12,8 +12,9 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual 
memory map (1TB)
+ ... unused hole ...
+ ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
+ ... unused hole ...
+-fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI
+-fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
++                                  vaddr_end for KASLR
++fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
++fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
+ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
+ ... unused hole ...
+ ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
+@@ -37,13 +38,15 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual 
memory map (512TB)
+ ... unused hole ...
+ ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
+ ... unused hole ...
+-fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
++                                  vaddr_end for KASLR
++fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
++... unused hole ...
+ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
+ ... unused hole ...
+ ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
+ ... unused hole ...
+ ffffffff80000000 - ffffffff9fffffff (=512 MB)  kernel text mapping, from phys 0
+-ffffffffa0000000 - [fixmap start]   (~1526 MB) module mapping space
++ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
+ [fixmap start]   - ffffffffff5fffff kernel-internal fixmap range
+ ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
+ ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
+@@ -67,9 +70,10 @@ memory window (this size is arbitrary, it can be raised 
later if needed).
+ The mappings are not part of any other kernel PGD and are only available
+ during EFI runtime calls.
+ 
+-The module mapping space size changes based on the CONFIG requirements for the
+-following fixmap section.
+-
+ Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
+ physical memory, vmalloc/ioremap space and virtual memory map are randomized.
+ Their order is preserved but their base will be offset early at boot time.
++
++Be very careful vs. KASLR when changing anything here. The KASLR address
++range must not overlap with anything except the KASAN shadow area, which is
++correct as KASAN disables KASLR.
+diff --git a/Makefile b/Makefile
+index 20f7d4de0f1c..a67c5179052a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
+index f35974ee7264..c9173c02081c 100644
+--- a/arch/arc/include/asm/uaccess.h
++++ b/arch/arc/include/asm/uaccess.h
+@@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, 
long count)
+               return 0;
+ 
+       __asm__ __volatile__(
++      "       mov     lp_count, %5            \n"
+       "       lp      3f                      \n"
+       "1:     ldb.ab  %3, [%2, 1]             \n"
+       "       breq.d  %3, 0, 3f               \n"
+@@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, 
long count)
+       "       .word   1b, 4b                  \n"
+       "       .previous                       \n"
+       : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
+-      : "g"(-EFAULT), "l"(count)
+-      : "memory");
++      : "g"(-EFAULT), "r"(count)
++      : "lp_count", "lp_start", "lp_end", "memory");
+ 
+       return res;
+ }
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index dd5a08aaa4da..3eb4bfc1fb36 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -12,6 +12,7 @@
+    for the semaphore.  */
+ 
+ #define __PA_LDCW_ALIGNMENT   16
++#define __PA_LDCW_ALIGN_ORDER 4
+ #define __ldcw_align(a) ({                                    \
+       unsigned long __ret = (unsigned long) &(a)->lock[0];    \
+       __ret = (__ret + __PA_LDCW_ALIGNMENT - 1)               \
+@@ -29,6 +30,7 @@
+    ldcd). */
+ 
+ #define __PA_LDCW_ALIGNMENT   4
++#define __PA_LDCW_ALIGN_ORDER 2
+ #define __ldcw_align(a) (&(a)->slock)
+ #define __LDCW        "ldcw,co"
+ 
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index f3cecf5117cf..e95207c0565e 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -35,6 +35,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/signal.h>
+ #include <asm/unistd.h>
++#include <asm/ldcw.h>
+ #include <asm/thread_info.h>
+ 
+ #include <linux/linkage.h>
+@@ -46,6 +47,14 @@
+ #endif
+ 
+       .import         pa_tlb_lock,data
++      .macro  load_pa_tlb_lock reg
++#if __PA_LDCW_ALIGNMENT > 4
++      load32  PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
++      depi    0,31,__PA_LDCW_ALIGN_ORDER, \reg
++#else
++      load32  PA(pa_tlb_lock), \reg
++#endif
++      .endm
+ 
+       /* space_to_prot macro creates a prot id from a space id */
+ 
+@@ -457,7 +466,7 @@
+       .macro          tlb_lock        spc,ptp,pte,tmp,tmp1,fault
+ #ifdef CONFIG_SMP
+       cmpib,COND(=),n 0,\spc,2f
+-      load32          PA(pa_tlb_lock),\tmp
++      load_pa_tlb_lock \tmp
+ 1:    LDCW            0(\tmp),\tmp1
+       cmpib,COND(=)   0,\tmp1,1b
+       nop
+@@ -480,7 +489,7 @@
+       /* Release pa_tlb_lock lock. */
+       .macro          tlb_unlock1     spc,tmp
+ #ifdef CONFIG_SMP
+-      load32          PA(pa_tlb_lock),\tmp
++      load_pa_tlb_lock \tmp
+       tlb_unlock0     \spc,\tmp
+ #endif
+       .endm
+diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
+index adf7187f8951..2d40c4ff3f69 100644
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -36,6 +36,7 @@
+ #include <asm/assembly.h>
+ #include <asm/pgtable.h>
+ #include <asm/cache.h>
++#include <asm/ldcw.h>
+ #include <linux/linkage.h>
+ 
+       .text
+@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
+ 
+       .macro  tlb_lock        la,flags,tmp
+ #ifdef CONFIG_SMP
+-      ldil            L%pa_tlb_lock,%r1
+-      ldo             R%pa_tlb_lock(%r1),\la
++#if __PA_LDCW_ALIGNMENT > 4
++      load32          pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
++      depi            0,31,__PA_LDCW_ALIGN_ORDER, \la
++#else
++      load32          pa_tlb_lock, \la
++#endif
+       rsm             PSW_SM_I,\flags
+ 1:    LDCW            0(\la),\tmp
+       cmpib,<>,n      0,\tmp,3f
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index 30f92391a93e..cad3e8661cd6 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -39,6 +39,7 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/fs.h>
++#include <linux/cpu.h>
+ #include <linux/module.h>
+ #include <linux/personality.h>
+ #include <linux/ptrace.h>
+@@ -183,6 +184,44 @@ int dump_task_fpu (struct task_struct *tsk, 
elf_fpregset_t *r)
+       return 1;
+ }
+ 
++/*
++ * Idle thread support
++ *
++ * Detect when running on QEMU with SeaBIOS PDC Firmware and let
++ * QEMU idle the host too.
++ */
++
++int running_on_qemu __read_mostly;
++
++void __cpuidle arch_cpu_idle_dead(void)
++{
++      /* nop on real hardware, qemu will offline CPU. */
++      asm volatile("or %%r31,%%r31,%%r31\n":::);
++}
++
++void __cpuidle arch_cpu_idle(void)
++{
++      local_irq_enable();
++
++      /* nop on real hardware, qemu will idle sleep. */
++      asm volatile("or %%r10,%%r10,%%r10\n":::);
++}
++
++static int __init parisc_idle_init(void)
++{
++      const char *marker;
++
++      /* check QEMU/SeaBIOS marker in PAGE0 */
++      marker = (char *) &PAGE0->pad0;
++      running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
++
++      if (!running_on_qemu)
++              cpu_idle_poll_ctrl(1);
++
++      return 0;
++}
++arch_initcall(parisc_idle_init);
++
+ /*
+  * Copy architecture-specific thread state
+  */
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 4797d08581ce..6e1e39035380 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, 
unsigned long address)
+       return __bad_area(regs, address, SEGV_MAPERR);
+ }
+ 
++static noinline int bad_access(struct pt_regs *regs, unsigned long address)
++{
++      return __bad_area(regs, address, SEGV_ACCERR);
++}
++
+ static int do_sigbus(struct pt_regs *regs, unsigned long address,
+                    unsigned int fault)
+ {
+@@ -490,7 +495,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned 
long address,
+ 
+ good_area:
+       if (unlikely(access_error(is_write, is_exec, vma)))
+-              return bad_area(regs, address);
++              return bad_access(regs, address);
+ 
+       /*
+        * If for any reason at all we couldn't handle the fault,
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 40d0a1a97889..b87a930c2201 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -794,11 +794,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
+ 
+       if (kvm->arch.use_cmma) {
+               /*
+-               * Get the last slot. They should be sorted by base_gfn, so the
+-               * last slot is also the one at the end of the address space.
+-               * We have verified above that at least one slot is present.
++               * Get the first slot. They are reverse sorted by base_gfn, so
++               * the first slot is also the one at the end of the address
++               * space. We have verified above that at least one slot is
++               * present.
+                */
+-              ms = slots->memslots + slots->used_slots - 1;
++              ms = slots->memslots;
+               /* round up so we only use full longs */
+               ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
+               /* allocate enough bytes to store all the bits */
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 5b25287f449b..7bd3a59232f0 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -1009,7 +1009,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const 
int orc)
+               cbrlo[entries] = gfn << PAGE_SHIFT;
+       }
+ 
+-      if (orc) {
++      if (orc && gfn < ms->bitmap_size) {
+               /* increment only if we are really flipping the bit to 1 */
+               if (!test_and_set_bit(gfn, ms->pgste_bitmap))
+                       atomic64_inc(&ms->dirty_pages);
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 8f0aace08b87..8156e47da7ba 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -5,6 +5,7 @@
+ 
+ #include <asm/cpu_entry_area.h>
+ #include <asm/perf_event.h>
++#include <asm/tlbflush.h>
+ #include <asm/insn.h>
+ 
+ #include "../perf_event.h"
+@@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
+ 
+ static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
+ {
++      unsigned long start = (unsigned long)cea;
+       phys_addr_t pa;
+       size_t msz = 0;
+ 
+       pa = virt_to_phys(addr);
++
++      preempt_disable();
+       for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
+               cea_set_pte(cea, pa, prot);
++
++      /*
++       * This is a cross-CPU update of the cpu_entry_area, we must shoot down
++       * all TLB entries for it.
++       */
++      flush_tlb_kernel_range(start, start + size);
++      preempt_enable();
+ }
+ 
+ static void ds_clear_cea(void *cea, size_t size)
+ {
++      unsigned long start = (unsigned long)cea;
+       size_t msz = 0;
+ 
++      preempt_disable();
+       for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
+               cea_set_pte(cea, 0, PAGE_NONE);
++
++      flush_tlb_kernel_range(start, start + size);
++      preempt_enable();
+ }
+ 
+ static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
+diff --git a/arch/x86/include/asm/alternative.h 
b/arch/x86/include/asm/alternative.h
+index dbfd0854651f..cf5961ca8677 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
+       ".popsection\n"                                                 \
+       ".pushsection .altinstr_replacement, \"ax\"\n"                  \
+       ALTINSTR_REPLACEMENT(newinstr, feature, 1)                      \
+-      ".popsection"
++      ".popsection\n"
+ 
+ #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+       OLDINSTR_2(oldinstr, 1, 2)                                      \
+@@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, 
void *end)
+       ".pushsection .altinstr_replacement, \"ax\"\n"                  \
+       ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)                    \
+       ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
+-      ".popsection"
++      ".popsection\n"
+ 
+ /*
+  * Alternative instructions for different CPU types or capabilities.
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index 07cdd1715705..21ac898df2d8 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -341,6 +341,6 @@
+ #define X86_BUG_SWAPGS_FENCE          X86_BUG(11) /* SWAPGS without input dep 
on GS */
+ #define X86_BUG_MONITOR                       X86_BUG(12) /* IPI required to 
wake up remote CPU */
+ #define X86_BUG_AMD_E400              X86_BUG(13) /* CPU is among the 
affected by Erratum 400 */
+-#define X86_BUG_CPU_INSECURE          X86_BUG(14) /* CPU is insecure and 
needs kernel page table isolation */
++#define X86_BUG_CPU_MELTDOWN          X86_BUG(14) /* CPU is affected by 
meltdown attack and needs kernel page table isolation */
+ 
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/pgtable_64_types.h 
b/arch/x86/include/asm/pgtable_64_types.h
+index b97a539bcdee..6b8f73dcbc2c 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t;
+ #define PGDIR_SIZE    (_AC(1, UL) << PGDIR_SHIFT)
+ #define PGDIR_MASK    (~(PGDIR_SIZE - 1))
+ 
+-/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
++/*
++ * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
++ *
++ * Be very careful vs. KASLR when changing anything here. The KASLR address
++ * range must not overlap with anything except the KASAN shadow area, which
++ * is correct as KASAN disables KASLR.
++ */
+ #define MAXMEM                        _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+ 
+ #ifdef CONFIG_X86_5LEVEL
+@@ -88,7 +94,7 @@ typedef struct { pteval_t pte; } pte_t;
+ # define VMALLOC_SIZE_TB      _AC(32, UL)
+ # define __VMALLOC_BASE               _AC(0xffffc90000000000, UL)
+ # define __VMEMMAP_BASE               _AC(0xffffea0000000000, UL)
+-# define LDT_PGD_ENTRY                _AC(-4, UL)
++# define LDT_PGD_ENTRY                _AC(-3, UL)
+ # define LDT_BASE_ADDR                (LDT_PGD_ENTRY << PGDIR_SHIFT)
+ #endif
+ 
+@@ -104,13 +110,13 @@ typedef struct { pteval_t pte; } pte_t;
+ 
+ #define MODULES_VADDR         (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+ /* The module sections ends with the start of the fixmap */
+-#define MODULES_END           __fix_to_virt(__end_of_fixed_addresses + 1)
++#define MODULES_END           _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN           (MODULES_END - MODULES_VADDR)
+ 
+ #define ESPFIX_PGD_ENTRY      _AC(-2, UL)
+ #define ESPFIX_BASE_ADDR      (ESPFIX_PGD_ENTRY << P4D_SHIFT)
+ 
+-#define CPU_ENTRY_AREA_PGD    _AC(-3, UL)
++#define CPU_ENTRY_AREA_PGD    _AC(-4, UL)
+ #define CPU_ENTRY_AREA_BASE   (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
+ 
+ #define EFI_VA_START          ( -4 * (_AC(1, UL) << 30))
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index 90cb82dbba57..570e8bb1f386 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -22,7 +22,7 @@ obj-y                        += common.o
+ obj-y                 += rdrand.o
+ obj-y                 += match.o
+ obj-y                 += bugs.o
+-obj-$(CONFIG_CPU_FREQ)        += aperfmperf.o
++obj-y                 += aperfmperf.o
+ obj-y                 += cpuid-deps.o
+ 
+ obj-$(CONFIG_PROC_FS) += proc.o
+diff --git a/arch/x86/kernel/cpu/aperfmperf.c 
b/arch/x86/kernel/cpu/aperfmperf.c
+index 0ee83321a313..7eba34df54c3 100644
+--- a/arch/x86/kernel/cpu/aperfmperf.c
++++ b/arch/x86/kernel/cpu/aperfmperf.c
+@@ -14,6 +14,8 @@
+ #include <linux/percpu.h>
+ #include <linux/smp.h>
+ 
++#include "cpu.h"
++
+ struct aperfmperf_sample {
+       unsigned int    khz;
+       ktime_t time;
+@@ -24,7 +26,7 @@ struct aperfmperf_sample {
+ static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
+ 
+ #define APERFMPERF_CACHE_THRESHOLD_MS 10
+-#define APERFMPERF_REFRESH_DELAY_MS   20
++#define APERFMPERF_REFRESH_DELAY_MS   10
+ #define APERFMPERF_STALE_THRESHOLD_MS 1000
+ 
+ /*
+@@ -38,14 +40,8 @@ static void aperfmperf_snapshot_khz(void *dummy)
+       u64 aperf, aperf_delta;
+       u64 mperf, mperf_delta;
+       struct aperfmperf_sample *s = this_cpu_ptr(&samples);
+-      ktime_t now = ktime_get();
+-      s64 time_delta = ktime_ms_delta(now, s->time);
+       unsigned long flags;
+ 
+-      /* Don't bother re-computing within the cache threshold time. */
+-      if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+-              return;
+-
+       local_irq_save(flags);
+       rdmsrl(MSR_IA32_APERF, aperf);
+       rdmsrl(MSR_IA32_MPERF, mperf);
+@@ -61,31 +57,68 @@ static void aperfmperf_snapshot_khz(void *dummy)
+       if (mperf_delta == 0)
+               return;
+ 
+-      s->time = now;
++      s->time = ktime_get();
+       s->aperf = aperf;
+       s->mperf = mperf;
++      s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
++}
+ 
+-      /* If the previous iteration was too long ago, discard it. */
+-      if (time_delta > APERFMPERF_STALE_THRESHOLD_MS)
+-              s->khz = 0;
+-      else
+-              s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
++static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
++{
++      s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
++
++      /* Don't bother re-computing within the cache threshold time. */
++      if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
++              return true;
++
++      smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
++
++      /* Return false if the previous iteration was too long ago. */
++      return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
+ }
+ 
+-unsigned int arch_freq_get_on_cpu(int cpu)
++unsigned int aperfmperf_get_khz(int cpu)
+ {
+-      unsigned int khz;
++      if (!cpu_khz)
++              return 0;
++
++      if (!static_cpu_has(X86_FEATURE_APERFMPERF))
++              return 0;
+ 
++      aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
++      return per_cpu(samples.khz, cpu);
++}
++
++void arch_freq_prepare_all(void)
++{
++      ktime_t now = ktime_get();
++      bool wait = false;
++      int cpu;
++
++      if (!cpu_khz)
++              return;
++
++      if (!static_cpu_has(X86_FEATURE_APERFMPERF))
++              return;
++
++      for_each_online_cpu(cpu)
++              if (!aperfmperf_snapshot_cpu(cpu, now, false))
++                      wait = true;
++
++      if (wait)
++              msleep(APERFMPERF_REFRESH_DELAY_MS);
++}
++
++unsigned int arch_freq_get_on_cpu(int cpu)
++{
+       if (!cpu_khz)
+               return 0;
+ 
+       if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+               return 0;
+ 
+-      smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+-      khz = per_cpu(samples.khz, cpu);
+-      if (khz)
+-              return khz;
++      if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
++              return per_cpu(samples.khz, cpu);
+ 
+       msleep(APERFMPERF_REFRESH_DELAY_MS);
+       smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index b1be494ab4e8..2d3bd2215e5b 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -900,7 +900,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 
*c)
+       setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+ 
+       if (c->x86_vendor != X86_VENDOR_AMD)
+-              setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
++              setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+ 
+       fpu__init_system(c);
+ 
+diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
+index f52a370b6c00..e806b11a99af 100644
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -47,4 +47,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
+ 
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
++
++unsigned int aperfmperf_get_khz(int cpu);
++
+ #endif /* ARCH_X86_CPU_H */
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c 
b/arch/x86/kernel/cpu/microcode/amd.c
+index c6daec4bdba5..330b8462d426 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 
patch_size,
+ #define F14H_MPB_MAX_SIZE 1824
+ #define F15H_MPB_MAX_SIZE 4096
+ #define F16H_MPB_MAX_SIZE 3458
++#define F17H_MPB_MAX_SIZE 3200
+ 
+       switch (family) {
+       case 0x14:
+@@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 
patch_size,
+       case 0x16:
+               max_size = F16H_MPB_MAX_SIZE;
+               break;
++      case 0x17:
++              max_size = F17H_MPB_MAX_SIZE;
++              break;
+       default:
+               max_size = F1XH_MPB_MAX_SIZE;
+               break;
+diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
+index 6b7e17bf0b71..e7ecedafa1c8 100644
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -5,6 +5,8 @@
+ #include <linux/seq_file.h>
+ #include <linux/cpufreq.h>
+ 
++#include "cpu.h"
++
+ /*
+  *    Get CPU information for use by the procfs.
+  */
+@@ -78,8 +80,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+               seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
+ 
+       if (cpu_has(c, X86_FEATURE_TSC)) {
+-              unsigned int freq = cpufreq_quick_get(cpu);
++              unsigned int freq = aperfmperf_get_khz(cpu);
+ 
++              if (!freq)
++                      freq = cpufreq_quick_get(cpu);
+               if (!freq)
+                       freq = cpu_khz;
+               seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
+index f56902c1f04b..2a4849e92831 100644
+--- a/arch/x86/mm/dump_pagetables.c
++++ b/arch/x86/mm/dump_pagetables.c
+@@ -61,10 +61,10 @@ enum address_markers_idx {
+       KASAN_SHADOW_START_NR,
+       KASAN_SHADOW_END_NR,
+ #endif
++      CPU_ENTRY_AREA_NR,
+ #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
+       LDT_NR,
+ #endif
+-      CPU_ENTRY_AREA_NR,
+ #ifdef CONFIG_X86_ESPFIX64
+       ESPFIX_START_NR,
+ #endif
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 80259ad8c386..6b462a472a7b 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -870,7 +870,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, 
cpu_tlbstate) = {
+       .next_asid = 1,
+       .cr4 = ~0UL,    /* fail hard if we screw up cr4 shadow initialization */
+ };
+-EXPORT_SYMBOL_GPL(cpu_tlbstate);
++EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
+ 
+ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
+ {
+diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
+index 879ef930e2c2..aedebd2ebf1e 100644
+--- a/arch/x86/mm/kaslr.c
++++ b/arch/x86/mm/kaslr.c
+@@ -34,25 +34,14 @@
+ #define TB_SHIFT 40
+ 
+ /*
+- * Virtual address start and end range for randomization. The end changes base
+- * on configuration to have the highest amount of space for randomization.
+- * It increases the possible random position for each randomized region.
++ * Virtual address start and end range for randomization.
+  *
+- * You need to add an if/def entry if you introduce a new memory region
+- * compatible with KASLR. Your entry must be in logical order with memory
+- * layout. For example, ESPFIX is before EFI because its virtual address is
+- * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() 
to
+- * ensure that this order is correct and won't be changed.
++ * The end address could depend on more configuration options to make the
++ * highest amount of space for randomization available, but that's too hard
++ * to keep straight and caused issues already.
+  */
+ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
+-
+-#if defined(CONFIG_X86_ESPFIX64)
+-static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
+-#elif defined(CONFIG_EFI)
+-static const unsigned long vaddr_end = EFI_VA_END;
+-#else
+-static const unsigned long vaddr_end = __START_KERNEL_map;
+-#endif
++static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
+ 
+ /* Default values */
+ unsigned long page_offset_base = __PAGE_OFFSET_BASE;
+@@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
+       unsigned long remain_entropy;
+ 
+       /*
+-       * All these BUILD_BUG_ON checks ensures the memory layout is
+-       * consistent with the vaddr_start/vaddr_end variables.
++       * These BUILD_BUG_ON checks ensure the memory layout is consistent
++       * with the vaddr_start/vaddr_end variables. These checks are very
++       * limited....
+        */
+       BUILD_BUG_ON(vaddr_start >= vaddr_end);
+-      BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
+-                   vaddr_end >= EFI_VA_END);
+-      BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
+-                    IS_ENABLED(CONFIG_EFI)) &&
+-                   vaddr_end >= __START_KERNEL_map);
++      BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
+       BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
+ 
+       if (!kaslr_memory_enabled())
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index 2da28ba97508..43d4a4a29037 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -56,13 +56,13 @@
+ 
+ static void __init pti_print_if_insecure(const char *reason)
+ {
+-      if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
++      if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+               pr_info("%s\n", reason);
+ }
+ 
+ static void __init pti_print_if_secure(const char *reason)
+ {
+-      if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
++      if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+               pr_info("%s\n", reason);
+ }
+ 
+@@ -96,7 +96,7 @@ void __init pti_check_boottime_disable(void)
+       }
+ 
+ autosel:
+-      if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
++      if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+               return;
+ enable:
+       setup_force_cpu_cap(X86_FEATURE_PTI);
+diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
+index 8a99a2e96537..5b513ccffde4 100644
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info 
*cap_info, void **pkbuff,
+       /*
+        * Update the first page pointer to skip over the CSH header.
+        */
+-      cap_info->pages[0] += csh->headersize;
++      cap_info->phys[0] += csh->headersize;
++
++      /*
++       * cap_info->capsule should point at a virtual mapping of the entire
++       * capsule, starting at the capsule header. Our image has the Quark
++       * security header prepended, so we cannot rely on the default vmap()
++       * mapping created by the generic capsule code.
++       * Given that the Quark firmware does not appear to care about the
++       * virtual mapping, let's just point cap_info->capsule at our copy
++       * of the capsule header.
++       */
++      cap_info->capsule = &cap_info->header;
+ 
+       return 1;
+ }
+diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
+index db1bc3147bc4..600afa99941f 100644
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template 
*tmpl, struct rtattr **tb,
+                                                   algt->mask));
+       if (IS_ERR(poly))
+               return PTR_ERR(poly);
++      poly_hash = __crypto_hash_alg_common(poly);
++
++      err = -EINVAL;
++      if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
++              goto out_put_poly;
+ 
+       err = -ENOMEM;
+       inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, 
struct rtattr **tb,
+ 
+       ctx = aead_instance_ctx(inst);
+       ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+-      poly_hash = __crypto_hash_alg_common(poly);
+       err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
+                                     aead_crypto_instance(inst));
+       if (err)
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index ee9cfb99fe25..f8ec3d4ba4a8 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
+       crypto_free_aead(ctx->child);
+ }
+ 
++static void pcrypt_free(struct aead_instance *inst)
++{
++      struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
++
++      crypto_drop_aead(&ctx->spawn);
++      kfree(inst);
++}
++
+ static int pcrypt_init_instance(struct crypto_instance *inst,
+                               struct crypto_alg *alg)
+ {
+@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template 
*tmpl, struct rtattr **tb,
+       inst->alg.encrypt = pcrypt_aead_encrypt;
+       inst->alg.decrypt = pcrypt_aead_decrypt;
+ 
++      inst->free = pcrypt_free;
++
+       err = aead_register_instance(tmpl, inst);
+       if (err)
+               goto out_drop_aead;
+@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, 
struct rtattr **tb)
+       return -EINVAL;
+ }
+ 
+-static void pcrypt_free(struct crypto_instance *inst)
+-{
+-      struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
+-
+-      crypto_drop_aead(&ctx->spawn);
+-      kfree(inst);
+-}
+-
+ static int pcrypt_cpumask_change_notify(struct notifier_block *self,
+                                       unsigned long val, void *data)
+ {
+@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt 
*pcrypt)
+ static struct crypto_template pcrypt_tmpl = {
+       .name = "pcrypt",
+       .create = pcrypt_create,
+-      .free = pcrypt_free,
+       .module = THIS_MODULE,
+ };
+ 
+diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
+index 328ca93781cf..1b76d9585902 100644
+--- a/drivers/bus/sunxi-rsb.c
++++ b/drivers/bus/sunxi-rsb.c
+@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
+       .match          = sunxi_rsb_device_match,
+       .probe          = sunxi_rsb_device_probe,
+       .remove         = sunxi_rsb_device_remove,
++      .uevent         = of_device_uevent_modalias,
+ };
+ 
+ static void sunxi_rsb_dev_release(struct device *dev)
+diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
+index 3e104f5aa0c2..b56b3f711d94 100644
+--- a/drivers/crypto/chelsio/Kconfig
++++ b/drivers/crypto/chelsio/Kconfig
+@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
+       select CRYPTO_SHA256
+       select CRYPTO_SHA512
+       select CRYPTO_AUTHENC
++      select CRYPTO_GF128MUL
+       ---help---
+         The Chelsio Crypto Co-processor driver for T6 adapters.
+ 
+diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
+index a9fd8b9e86cd..699ee5a9a8f9 100644
+--- a/drivers/crypto/n2_core.c
++++ b/drivers/crypto/n2_core.c
+@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
+                                         CWQ_ENTRY_SIZE, 0, NULL);
+       if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+               kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
++              queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+               return -ENOMEM;
+       }
+       return 0;
+@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
+ {
+       kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+       kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
++      queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
++      queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
+ }
+ 
+ static long spu_queue_register_workfn(void *arg)
+diff --git a/drivers/firmware/efi/capsule-loader.c 
b/drivers/firmware/efi/capsule-loader.c
+index ec8ac5c4dd84..055e2e8f985a 100644
+--- a/drivers/firmware/efi/capsule-loader.c
++++ b/drivers/firmware/efi/capsule-loader.c
+@@ -20,10 +20,6 @@
+ 
+ #define NO_FURTHER_WRITE_ACTION -1
+ 
+-#ifndef phys_to_page
+-#define phys_to_page(x)               pfn_to_page((x) >> PAGE_SHIFT)
+-#endif
+-
+ /**
+  * efi_free_all_buff_pages - free all previous allocated buffer pages
+  * @cap_info: pointer to current instance of capsule_info structure
+@@ -35,7 +31,7 @@
+ static void efi_free_all_buff_pages(struct capsule_info *cap_info)
+ {
+       while (cap_info->index > 0)
+-              __free_page(phys_to_page(cap_info->pages[--cap_info->index]));
++              __free_page(cap_info->pages[--cap_info->index]);
+ 
+       cap_info->index = NO_FURTHER_WRITE_ACTION;
+ }
+@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
+ 
+       cap_info->pages = temp_page;
+ 
++      temp_page = krealloc(cap_info->phys,
++                           pages_needed * sizeof(phys_addr_t *),
++                           GFP_KERNEL | __GFP_ZERO);
++      if (!temp_page)
++              return -ENOMEM;
++
++      cap_info->phys = temp_page;
++
+       return 0;
+ }
+ 
+@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info 
*cap_info, void *kbuff,
+  **/
+ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
+ {
++      bool do_vunmap = false;
+       int ret;
+ 
+-      ret = efi_capsule_update(&cap_info->header, cap_info->pages);
++      /*
++       * cap_info->capsule may have been assigned already by a quirk
++       * handler, so only overwrite it if it is NULL
++       */
++      if (!cap_info->capsule) {
++              cap_info->capsule = vmap(cap_info->pages, cap_info->index,
++                                       VM_MAP, PAGE_KERNEL);
++              if (!cap_info->capsule)
++                      return -ENOMEM;
++              do_vunmap = true;
++      }
++
++      ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
++      if (do_vunmap)
++              vunmap(cap_info->capsule);
+       if (ret) {
+               pr_err("capsule update failed\n");
+               return ret;
+@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, 
const char __user *buff,
+                       goto failed;
+               }
+ 
+-              cap_info->pages[cap_info->index++] = page_to_phys(page);
++              cap_info->pages[cap_info->index] = page;
++              cap_info->phys[cap_info->index] = page_to_phys(page);
+               cap_info->page_bytes_remain = PAGE_SIZE;
++              cap_info->index++;
+       } else {
+-              page = phys_to_page(cap_info->pages[cap_info->index - 1]);
++              page = cap_info->pages[cap_info->index - 1];
+       }
+ 
+       kbuff = kmap(page);
+@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct 
file *file)
+       struct capsule_info *cap_info = file->private_data;
+ 
+       kfree(cap_info->pages);
++      kfree(cap_info->phys);
+       kfree(file->private_data);
+       file->private_data = NULL;
+       return 0;
+@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct 
file *file)
+               return -ENOMEM;
+       }
+ 
++      cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
++      if (!cap_info->phys) {
++              kfree(cap_info->pages);
++              kfree(cap_info);
++              return -ENOMEM;
++      }
++
+       file->private_data = cap_info;
+ 
+       return 0;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index c9bcc6c45012..ce2ed16f2a30 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -6944,6 +6944,7 @@ enum {
+ #define  RESET_PCH_HANDSHAKE_ENABLE   (1<<4)
+ 
+ #define GEN8_CHICKEN_DCPR_1           _MMIO(0x46430)
++#define   SKL_SELECT_ALTERNATE_DC_EXIT        (1<<30)
+ #define   MASK_WAKEMEM                        (1<<13)
+ 
+ #define SKL_DFSM                      _MMIO(0x51000)
+@@ -8475,6 +8476,7 @@ enum skl_power_gate {
+ #define  BXT_CDCLK_CD2X_DIV_SEL_2     (2<<22)
+ #define  BXT_CDCLK_CD2X_DIV_SEL_4     (3<<22)
+ #define  BXT_CDCLK_CD2X_PIPE(pipe)    ((pipe)<<20)
++#define  CDCLK_DIVMUX_CD_OVERRIDE     (1<<19)
+ #define  BXT_CDCLK_CD2X_PIPE_NONE     BXT_CDCLK_CD2X_PIPE(3)
+ #define  BXT_CDCLK_SSA_PRECHARGE_ENABLE       (1<<16)
+ #define  CDCLK_FREQ_DECIMAL_MASK      (0x7ff)
+diff --git a/drivers/gpu/drm/i915/intel_cdclk.c 
b/drivers/gpu/drm/i915/intel_cdclk.c
+index 1241e5891b29..26a8dcd2c549 100644
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -859,16 +859,10 @@ static void skl_set_preferred_cdclk_vco(struct 
drm_i915_private *dev_priv,
+ 
+ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
+ {
+-      int min_cdclk = skl_calc_cdclk(0, vco);
+       u32 val;
+ 
+       WARN_ON(vco != 8100000 && vco != 8640000);
+ 
+-      /* select the minimum CDCLK before enabling DPLL 0 */
+-      val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
+-      I915_WRITE(CDCLK_CTL, val);
+-      POSTING_READ(CDCLK_CTL);
+-
+       /*
+        * We always enable DPLL0 with the lowest link rate possible, but still
+        * taking into account the VCO required to operate the eDP panel at the
+@@ -922,7 +916,7 @@ static void skl_set_cdclk(struct drm_i915_private 
*dev_priv,
+ {
+       int cdclk = cdclk_state->cdclk;
+       int vco = cdclk_state->vco;
+-      u32 freq_select, pcu_ack;
++      u32 freq_select, pcu_ack, cdclk_ctl;
+       int ret;
+ 
+       WARN_ON((cdclk == 24000) != (vco == 0));
+@@ -939,7 +933,7 @@ static void skl_set_cdclk(struct drm_i915_private 
*dev_priv,
+               return;
+       }
+ 
+-      /* set CDCLK_CTL */
++      /* Choose frequency for this cdclk */
+       switch (cdclk) {
+       case 450000:
+       case 432000:
+@@ -967,10 +961,33 @@ static void skl_set_cdclk(struct drm_i915_private 
*dev_priv,
+           dev_priv->cdclk.hw.vco != vco)
+               skl_dpll0_disable(dev_priv);
+ 
++      cdclk_ctl = I915_READ(CDCLK_CTL);
++
++      if (dev_priv->cdclk.hw.vco != vco) {
++              /* Wa Display #1183: skl,kbl,cfl */
++              cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
++              cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
++              I915_WRITE(CDCLK_CTL, cdclk_ctl);
++      }
++
++      /* Wa Display #1183: skl,kbl,cfl */
++      cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
++      I915_WRITE(CDCLK_CTL, cdclk_ctl);
++      POSTING_READ(CDCLK_CTL);
++
+       if (dev_priv->cdclk.hw.vco != vco)
+               skl_dpll0_enable(dev_priv, vco);
+ 
+-      I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
++      /* Wa Display #1183: skl,kbl,cfl */
++      cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
++      I915_WRITE(CDCLK_CTL, cdclk_ctl);
++
++      cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
++      I915_WRITE(CDCLK_CTL, cdclk_ctl);
++
++      /* Wa Display #1183: skl,kbl,cfl */
++      cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
++      I915_WRITE(CDCLK_CTL, cdclk_ctl);
+       POSTING_READ(CDCLK_CTL);
+ 
+       /* inform PCU of the change */
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c 
b/drivers/gpu/drm/i915/intel_runtime_pm.c
+index 49577eba8e7e..51cb5293bf43 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
+@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+ 
+       DRM_DEBUG_KMS("Enabling DC5\n");
+ 
++      /* Wa Display #1183: skl,kbl,cfl */
++      if (IS_GEN9_BC(dev_priv))
++              I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
++                         SKL_SELECT_ALTERNATE_DC_EXIT);
++
+       gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+ }
+ 
+@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
+ {
+       DRM_DEBUG_KMS("Disabling DC6\n");
+ 
++      /* Wa Display #1183: skl,kbl,cfl */
++      if (IS_GEN9_BC(dev_priv))
++              I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
++                         SKL_SELECT_ALTERNATE_DC_EXIT);
++
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ }
+ 
+@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private 
*dev_priv,
+       GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
++      BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+ 
+ #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (               \
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index b84cd978fce2..a4aaa748e987 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data 
*etd)
+               case 5:
+                       etd->hw_version = 3;
+                       break;
+-              case 6 ... 14:
++              case 6 ... 15:
+                       etd->hw_version = 4;
+                       break;
+               default:
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index e67ba6c40faf..8f7a3c00b6cf 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -1611,13 +1611,15 @@ static int arm_smmu_domain_finalise(struct 
iommu_domain *domain)
+       domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+       domain->geometry.aperture_end = (1UL << ias) - 1;
+       domain->geometry.force_aperture = true;
+-      smmu_domain->pgtbl_ops = pgtbl_ops;
+ 
+       ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+-      if (ret < 0)
++      if (ret < 0) {
+               free_io_pgtable_ops(pgtbl_ops);
++              return ret;
++      }
+ 
+-      return ret;
++      smmu_domain->pgtbl_ops = pgtbl_ops;
++      return 0;
+ }
+ 
+ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 
sid)
+@@ -1644,7 +1646,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct 
arm_smmu_device *smmu, u32 sid)
+ 
+ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+ {
+-      int i;
++      int i, j;
+       struct arm_smmu_master_data *master = fwspec->iommu_priv;
+       struct arm_smmu_device *smmu = master->smmu;
+ 
+@@ -1652,6 +1654,13 @@ static void arm_smmu_install_ste_for_dev(struct 
iommu_fwspec *fwspec)
+               u32 sid = fwspec->ids[i];
+               __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+ 
++              /* Bridged PCI devices may end up with duplicated IDs */
++              for (j = 0; j < i; j++)
++                      if (fwspec->ids[j] == sid)
++                              break;
++              if (j < i)
++                      continue;
++
+               arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
+       }
+ }
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index 85cff68643e0..125b744c9c28 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -950,6 +950,7 @@ static void prepare_start_command(struct pxa3xx_nand_info 
*info, int command)
+ 
+       switch (command) {
+       case NAND_CMD_READ0:
++      case NAND_CMD_READOOB:
+       case NAND_CMD_PAGEPROG:
+               info->use_ecc = 1;
+               break;
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 19e4ad2f3f2e..0c4b690cf761 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
+ 
+       spin_lock(&root->inode_lock);
+       node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
++
+       if (node) {
+               if (btrfs_inode->delayed_node) {
+                       refcount_inc(&node->refs);      /* can be accessed */
+@@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
+                       spin_unlock(&root->inode_lock);
+                       return node;
+               }
+-              btrfs_inode->delayed_node = node;
+-              /* can be accessed and cached in the inode */
+-              refcount_add(2, &node->refs);
++
++              /*
++               * It's possible that we're racing into the middle of removing
++               * this node from the radix tree.  In this case, the refcount
++               * was zero and it should never go back to one.  Just return
++               * NULL like it was never in the radix at all; our release
++               * function is in the process of removing it.
++               *
++               * Some implementations of refcount_inc refuse to bump the
++               * refcount once it has hit zero.  If we don't do this dance
++               * here, refcount_inc() may decide to just WARN_ONCE() instead
++               * of actually bumping the refcount.
++               *
++               * If this node is properly in the radix, we want to bump the
++               * refcount twice, once for the inode and once for this get
++               * operation.
++               */
++              if (refcount_inc_not_zero(&node->refs)) {
++                      refcount_inc(&node->refs);
++                      btrfs_inode->delayed_node = node;
++              } else {
++                      node = NULL;
++              }
++
+               spin_unlock(&root->inode_lock);
+               return node;
+       }
+@@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node(
+       mutex_unlock(&delayed_node->mutex);
+ 
+       if (refcount_dec_and_test(&delayed_node->refs)) {
+-              bool free = false;
+               struct btrfs_root *root = delayed_node->root;
++
+               spin_lock(&root->inode_lock);
+-              if (refcount_read(&delayed_node->refs) == 0) {
+-                      radix_tree_delete(&root->delayed_nodes_tree,
+-                                        delayed_node->inode_id);
+-                      free = true;
+-              }
++              /*
++               * Once our refcount goes to zero, nobody is allowed to bump it
++               * back up.  We can delete it now.
++               */
++              ASSERT(refcount_read(&delayed_node->refs) == 0);
++              radix_tree_delete(&root->delayed_nodes_tree,
++                                delayed_node->inode_id);
+               spin_unlock(&root->inode_lock);
+-              if (free)
+-                      kmem_cache_free(delayed_node_cache, delayed_node);
++              kmem_cache_free(delayed_node_cache, delayed_node);
+       }
+ }
+ 
+diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c
+index e0f867cd8553..96f1087e372c 100644
+--- a/fs/proc/cpuinfo.c
++++ b/fs/proc/cpuinfo.c
+@@ -1,12 +1,18 @@
+ // SPDX-License-Identifier: GPL-2.0
++#include <linux/cpufreq.h>
+ #include <linux/fs.h>
+ #include <linux/init.h>
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
+ 
++__weak void arch_freq_prepare_all(void)
++{
++}
++
+ extern const struct seq_operations cpuinfo_op;
+ static int cpuinfo_open(struct inode *inode, struct file *file)
+ {
++      arch_freq_prepare_all();
+       return seq_open(file, &cpuinfo_op);
+ }
+ 
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 1c713fd5b3e6..5aa392eae1c3 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -570,11 +570,14 @@ int handle_userfault(struct vm_fault *vmf, unsigned long 
reason)
+ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
+                                             struct userfaultfd_wait_queue 
*ewq)
+ {
++      struct userfaultfd_ctx *release_new_ctx;
++
+       if (WARN_ON_ONCE(current->flags & PF_EXITING))
+               goto out;
+ 
+       ewq->ctx = ctx;
+       init_waitqueue_entry(&ewq->wq, current);
++      release_new_ctx = NULL;
+ 
+       spin_lock(&ctx->event_wqh.lock);
+       /*
+@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct 
userfaultfd_ctx *ctx,
+                               new = (struct userfaultfd_ctx *)
+                                       (unsigned long)
+                                       ewq->msg.arg.reserved.reserved1;
+-
+-                              userfaultfd_ctx_put(new);
++                              release_new_ctx = new;
+                       }
+                       break;
+               }
+@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct 
userfaultfd_ctx *ctx,
+       __set_current_state(TASK_RUNNING);
+       spin_unlock(&ctx->event_wqh.lock);
+ 
++      if (release_new_ctx) {
++              struct vm_area_struct *vma;
++              struct mm_struct *mm = release_new_ctx->mm;
++
++              /* the various vma->vm_userfaultfd_ctx still points to it */
++              down_write(&mm->mmap_sem);
++              for (vma = mm->mmap; vma; vma = vma->vm_next)
++                      if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
++                              vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
++              up_write(&mm->mmap_sem);
++
++              userfaultfd_ctx_put(release_new_ctx);
++      }
++
+       /*
+        * ctx may go away after this if the userfault pseudo fd is
+        * already released.
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 537ff842ff73..cbf85c4c745f 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -917,6 +917,7 @@ static inline bool policy_has_boost_freq(struct 
cpufreq_policy *policy)
+ }
+ #endif
+ 
++extern void arch_freq_prepare_all(void);
+ extern unsigned int arch_freq_get_on_cpu(int cpu);
+ 
+ /* the following are really really optional */
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index d813f7b04da7..29fdf8029cf6 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -140,11 +140,13 @@ struct efi_boot_memmap {
+ 
+ struct capsule_info {
+       efi_capsule_header_t    header;
++      efi_capsule_header_t    *capsule;
+       int                     reset_type;
+       long                    index;
+       size_t                  count;
+       size_t                  total_size;
+-      phys_addr_t             *pages;
++      struct page             **pages;
++      phys_addr_t             *phys;
+       size_t                  page_bytes_remain;
+ };
+ 
+diff --git a/include/linux/fscache.h b/include/linux/fscache.h
+index f4ff47d4a893..fe0c349684fa 100644
+--- a/include/linux/fscache.h
++++ b/include/linux/fscache.h
+@@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie 
*cookie,
+ {
+       if (fscache_cookie_valid(cookie) && PageFsCache(page))
+               return __fscache_maybe_release_page(cookie, page, gfp);
+-      return false;
++      return true;
+ }
+ 
+ /**
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 6670fbd3e466..354578d253d5 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
+ {
+       struct kstatfs sbuf;
+ 
+-      if (time_is_before_jiffies(acct->needcheck))
++      if (time_is_after_jiffies(acct->needcheck))
+               goto out;
+ 
+       /* May block */
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 8dcd8825b2de..1facff1dbbae 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -78,7 +78,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, 
bool force)
+       handler = sig_handler(t, sig);
+ 
+       if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
+-                      handler == SIG_DFL && !force)
++          handler == SIG_DFL && !(force && sig_kernel_only(sig)))
+               return 1;
+ 
+       return sig_handler_ignored(handler, sig);
+@@ -94,13 +94,15 @@ static int sig_ignored(struct task_struct *t, int sig, 
bool force)
+       if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
+               return 0;
+ 
+-      if (!sig_task_ignored(t, sig, force))
+-              return 0;
+-
+       /*
+-       * Tracers may want to know about even ignored signals.
++       * Tracers may want to know about even ignored signal unless it
++       * is SIGKILL which can't be reported anyway but can be ignored
++       * by SIGNAL_UNKILLABLE task.
+        */
+-      return !t->ptrace;
++      if (t->ptrace && sig != SIGKILL)
++              return 0;
++
++      return sig_task_ignored(t, sig, force);
+ }
+ 
+ /*
+@@ -929,9 +931,9 @@ static void complete_signal(int sig, struct task_struct 
*p, int group)
+        * then start taking the whole group down immediately.
+        */
+       if (sig_fatal(p, sig) &&
+-          !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
++          !(signal->flags & SIGNAL_GROUP_EXIT) &&
+           !sigismember(&t->real_blocked, sig) &&
+-          (sig == SIGKILL || !t->ptrace)) {
++          (sig == SIGKILL || !p->ptrace)) {
+               /*
+                * This signal will be fatal to the whole group.
+                */
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index ec39f730a0bf..58b629bb70de 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct 
vm_area_struct *vma,
+               next = pmd_addr_end(addr, end);
+               if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && 
!pmd_devmap(*pmd)
+                               && pmd_none_or_clear_bad(pmd))
+-                      continue;
++                      goto next;
+ 
+               /* invoke the mmu notifier if the pmd is populated */
+               if (!mni_start) {
+@@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct 
vm_area_struct *vma,
+                                       }
+ 
+                                       /* huge pmd was handled */
+-                                      continue;
++                                      goto next;
+                               }
+                       }
+                       /* fall through, the trans huge pmd just split */
+@@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct 
vm_area_struct *vma,
+               this_pages = change_pte_range(vma, pmd, addr, next, newprot,
+                                dirty_accountable, prot_numa);
+               pages += this_pages;
++next:
++              cond_resched();
+       } while (pmd++, addr = next, addr != end);
+ 
+       if (mni_start)
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 60805abf98af..30e56a100ee8 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, 
unsigned long end)
+       if (unlikely(!mem_section)) {
+               unsigned long size, align;
+ 
+-              size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
++              size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
+               align = 1 << (INTERNODE_CACHE_SHIFT);
+               mem_section = memblock_virt_alloc(size, align);
+       }
+diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
+index 82a64b58041d..e395137ecff1 100644
+--- a/security/apparmor/mount.c
++++ b/security/apparmor/mount.c
+@@ -330,6 +330,9 @@ static int match_mnt_path_str(struct aa_profile *profile,
+       AA_BUG(!mntpath);
+       AA_BUG(!buffer);
+ 
++      if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
++              return 0;
++
+       error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
+                            &mntpnt, &info, profile->disconnected);
+       if (error)
+@@ -381,6 +384,9 @@ static int match_mnt(struct aa_profile *profile, const 
struct path *path,
+       AA_BUG(!profile);
+       AA_BUG(devpath && !devbuffer);
+ 
++      if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
++              return 0;
++
+       if (devpath) {
+               error = aa_path_name(devpath, path_flags(profile, devpath),
+                                    devbuffer, &devname, &info,
+@@ -559,6 +565,9 @@ static int profile_umount(struct aa_profile *profile, 
struct path *path,
+       AA_BUG(!profile);
+       AA_BUG(!path);
+ 
++      if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
++              return 0;
++
+       error = aa_path_name(path, path_flags(profile, path), buffer, &name,
+                            &info, profile->disconnected);
+       if (error)
+@@ -614,7 +623,8 @@ static struct aa_label *build_pivotroot(struct aa_profile 
*profile,
+       AA_BUG(!new_path);
+       AA_BUG(!old_path);
+ 
+-      if (profile_unconfined(profile))
++      if (profile_unconfined(profile) ||
++          !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+               return aa_get_newest_label(&profile->label);
+ 
+       error = aa_path_name(old_path, path_flags(profile, old_path),

Reply via email to