Git-Url: 
http://git.frugalware.org/gitweb/gitweb.cgi?p=frugalware-current.git;a=commitdiff;h=f43cb876155ccc00a38bbf7516f928fbdf7d5a72

commit f43cb876155ccc00a38bbf7516f928fbdf7d5a72
Author: crazy <cr...@frugalware.org>
Date:   Wed Jan 17 14:54:29 2018 +0100

kernel-4.14.13-1-x86_64

* prepare for 4.14.14
* added missing retpoline bits for Skylake
* remove e1000e fix , .14 has that fix finally

diff --git a/source/base/kernel/FrugalBuild b/source/base/kernel/FrugalBuild
index 528797d..8a9bac8 100644
--- a/source/base/kernel/FrugalBuild
+++ b/source/base/kernel/FrugalBuild
@@ -45,10 +45,12 @@ _F_kernel_patches=(
introduce-NUMA-identity-node-sched-domain.patch
# mute PPS error shit .. buggy in 4.14.x
mute-pps_state_mismatch.patch
-                  fix-e1000e-nm.patch
# see https://marc.info/?l=linux-kernel&m=151561236821659&w=2
# ZEN microcode update fix from a initrd with mem_encrpyt=on
SME-BSP_SME-microcode-update-fixes.patch
+                  # missing retpoline bits in .14
+                  retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch
+                  
retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch
)

for ppatch in "${_F_kernel_patches[@]}"
diff --git a/source/base/kernel/fix-e1000e-nm.patch 
b/source/base/kernel/fix-e1000e-nm.patch
deleted file mode 100644
index dd7f8ed..0000000
--- a/source/base/kernel/fix-e1000e-nm.patch
+++ /dev/null
@@ -1,46 +0,0 @@
----
- drivers/net/ethernet/intel/e1000e/ich8lan.c | 11 ++++++++---
- 1 file changed, 8 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c 
b/drivers/net/ethernet/intel/e1000e/ich8lan.c
-index d6d4ed7acf03..31277d3bb7dc 100644
---- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
-+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
-@@ -1367,6 +1367,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, 
bool force)
-  *  Checks to see of the link status of the hardware has changed.  If a
-  *  change in link status has been detected, then we read the PHY registers
-  *  to get the current speed/duplex if link exists.
-+ *
-+ *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
-+ *  up).
-  **/
- static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
- {
-@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct 
e1000_hw *hw)
-        * Change or Rx Sequence Error interrupt.
-        */
-       if (!mac->get_link_status)
--              return 0;
-+              return 1;
-
-       /* First we want to see if the MII Status Register reports
-        * link.  If so, then we want to get the current speed/duplex
-@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct 
e1000_hw *hw)
-        * different link partner.
-        */
-       ret_val = e1000e_config_fc_after_link_up(hw);
--      if (ret_val)
-+      if (ret_val) {
-               e_dbg("Error configuring flow control\n");
-+              return ret_val;
-+      }
-
--      return ret_val;
-+      return 1;
- }
-
- static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
---
-2.15.1
-
-
\ No newline at end of file
diff --git 
a/source/base/kernel/retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch
 
b/source/base/kernel/retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch
new file mode 100644
index 0000000..8f402eb
--- /dev/null
+++ 
b/source/base/kernel/retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch
@@ -0,0 +1,175 @@
+From c995efd5a740d9cbafbf58bde4973e8b50b4d761 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <d...@amazon.co.uk>
+Date: Fri, 12 Jan 2018 17:49:25 +0000
+Subject: x86/retpoline: Fill RSB on context switch for affected CPUs
+
+On context switch from a shallow call stack to a deeper one, as the CPU
+does 'ret' up the deeper side it may encounter RSB entries (predictions for
+where the 'ret' goes to) which were populated in userspace.
+
+This is problematic if neither SMEP nor KPTI (the latter of which marks
+userspace pages as NX for the kernel) are active, as malicious code in
+userspace may then be executed speculatively.
+
+Overwrite the CPU's return prediction stack with calls which are predicted
+to return to an infinite loop, to "capture" speculation if this
+happens. This is required both for retpoline, and also in conjunction with
+IBRS for !SMEP && !KPTI.
+
+On Skylake+ the problem is slightly different, and an *underflow* of the
+RSB may cause errant branch predictions to occur. So there it's not so much
+overwrite, as *filling* the RSB to attempt to prevent it getting
+empty. This is only a partial solution for Skylake+ since there are many
+other conditions which may result in the RSB becoming empty. The full
+solution on Skylake+ is to use IBRS, which will prevent the problem even
+when the RSB becomes empty. With IBRS, the RSB-stuffing will not be
+required on context switch.
+
+[ tglx: Added missing vendor check and slighty massaged comments and
+       changelog ]
+
+Signed-off-by: David Woodhouse <d...@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <t...@linutronix.de>
+Acked-by: Arjan van de Ven <ar...@linux.intel.com>
+Cc: gno...@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <r...@redhat.com>
+Cc: Andi Kleen <a...@linux.intel.com>
+Cc: Josh Poimboeuf <jpoim...@redhat.com>
+Cc: thomas.lenda...@amd.com
+Cc: Peter Zijlstra <pet...@infradead.org>
+Cc: Linus Torvalds <torva...@linux-foundation.org>
+Cc: Jiri Kosina <ji...@kernel.org>
+Cc: Andy Lutomirski <l...@amacapital.net>
+Cc: Dave Hansen <dave.han...@intel.com>
+Cc: Kees Cook <keesc...@google.com>
+Cc: Tim Chen <tim.c.c...@linux.intel.com>
+Cc: Greg Kroah-Hartman <gre...@linux-foundation.org>
+Cc: Paul Turner <p...@google.com>
+Link: 
https://lkml.kernel.org/r/1515779365-9032-1-git-send-email-d...@amazon.co.uk
+---
+ arch/x86/entry/entry_32.S          | 11 +++++++++++
+ arch/x86/entry/entry_64.S          | 11 +++++++++++
+ arch/x86/include/asm/cpufeatures.h |  1 +
+ arch/x86/kernel/cpu/bugs.c         | 36 ++++++++++++++++++++++++++++++++++++
+ 4 files changed, 59 insertions(+)
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index a1f28a5..60c4c34 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -244,6 +244,17 @@ ENTRY(__switch_to_asm)
+       movl    %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
+ #endif
+
++#ifdef CONFIG_RETPOLINE
++      /*
++       * When switching from a shallower to a deeper call stack
++       * the RSB may either underflow or use entries populated
++       * with userspace addresses. On CPUs where those concerns
++       * exist, overwrite the RSB with entries which capture
++       * speculative execution to prevent attack.
++       */
++      FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++#endif
++
+       /* restore callee-saved registers */
+       popl    %esi
+       popl    %edi
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 59874bc..d54a0ed 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -487,6 +487,17 @@ ENTRY(__switch_to_asm)
+       movq    %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
+ #endif
+
++#ifdef CONFIG_RETPOLINE
++      /*
++       * When switching from a shallower to a deeper call stack
++       * the RSB may either underflow or use entries populated
++       * with userspace addresses. On CPUs where those concerns
++       * exist, overwrite the RSB with entries which capture
++       * speculative execution to prevent attack.
++       */
++      FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++#endif
++
+       /* restore callee-saved registers */
+       popq    %r15
+       popq    %r14
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index f275447..aa09559 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -211,6 +211,7 @@
+ #define X86_FEATURE_AVX512_4FMAPS     ( 7*32+17) /* AVX-512 Multiply 
Accumulation Single precision */
+
+ #define X86_FEATURE_MBA                       ( 7*32+18) /* Memory Bandwidth 
Allocation */
++#define X86_FEATURE_RSB_CTXSW         ( 7*32+19) /* Fill RSB on context 
switches */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW                ( 8*32+ 0) /* Intel TPR Shadow 
*/
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index e4dc261..390b3dc 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -23,6 +23,7 @@
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+ #include <asm/set_memory.h>
++#include <asm/intel-family.h>
+
+ static void __init spectre_v2_select_mitigation(void);
+
+@@ -155,6 +156,23 @@ disable:
+       return SPECTRE_V2_CMD_NONE;
+ }
+
++/* Check for Skylake-like CPUs (for RSB handling) */
++static bool __init is_skylake_era(void)
++{
++      if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
++          boot_cpu_data.x86 == 6) {
++              switch (boot_cpu_data.x86_model) {
++              case INTEL_FAM6_SKYLAKE_MOBILE:
++              case INTEL_FAM6_SKYLAKE_DESKTOP:
++              case INTEL_FAM6_SKYLAKE_X:
++              case INTEL_FAM6_KABYLAKE_MOBILE:
++              case INTEL_FAM6_KABYLAKE_DESKTOP:
++                      return true;
++              }
++      }
++      return false;
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+       enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -213,6 +231,24 @@ retpoline_auto:
+
+       spectre_v2_enabled = mode;
+       pr_info("%s\n", spectre_v2_strings[mode]);
++
++      /*
++       * If neither SMEP or KPTI are available, there is a risk of
++       * hitting userspace addresses in the RSB after a context switch
++       * from a shallow call stack to a deeper one. To prevent this fill
++       * the entire RSB, even when using IBRS.
++       *
++       * Skylake era CPUs have a separate issue with *underflow* of the
++       * RSB, when they will predict 'ret' targets from the generic BTB.
++       * The proper mitigation for this is IBRS. If IBRS is not supported
++       * or deactivated in favour of retpolines the RSB fill on context
++       * switch is required.
++       */
++      if ((!boot_cpu_has(X86_FEATURE_PTI) &&
++           !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
++              setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
++              pr_info("Filling RSB on context switch\n");
++      }
+ }
+
+ #undef pr_fmt
+--
+cgit v1.1
+
diff --git 
a/source/base/kernel/retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch
 
b/source/base/kernel/retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch
new file mode 100644
index 0000000..d930100
--- /dev/null
+++ 
b/source/base/kernel/retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch
@@ -0,0 +1,90 @@
+From 28d437d550e1e39f805d99f9f8ac399c778827b7 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lenda...@amd.com>
+Date: Sat, 13 Jan 2018 17:27:30 -0600
+Subject: x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
+
+The PAUSE instruction is currently used in the retpoline and RSB filling
+macros as a speculation trap.  The use of PAUSE was originally suggested
+because it showed a very, very small difference in the amount of
+cycles/time used to execute the retpoline as compared to LFENCE.  On AMD,
+the PAUSE instruction is not a serializing instruction, so the pause/jmp
+loop will use excess power as it is speculated over waiting for return
+to mispredict to the correct target.
+
+The RSB filling macro is applicable to AMD, and, if software is unable to
+verify that LFENCE is serializing on AMD (possible when running under a
+hypervisor), the generic retpoline support will be used and, so, is also
+applicable to AMD.  Keep the current usage of PAUSE for Intel, but add an
+LFENCE instruction to the speculation trap for AMD.
+
+The same sequence has been adopted by GCC for the GCC generated retpolines.
+
+Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
+Signed-off-by: Thomas Gleixner <t...@linutronix.de>
+Reviewed-by: Borislav Petkov <b...@alien8.de>
+Acked-by: David Woodhouse <d...@amazon.co.uk>
+Acked-by: Arjan van de Ven <ar...@linux.intel.com>
+Cc: Rik van Riel <r...@redhat.com>
+Cc: Andi Kleen <a...@linux.intel.com>
+Cc: Paul Turner <p...@google.com>
+Cc: Peter Zijlstra <pet...@infradead.org>
+Cc: Tim Chen <tim.c.c...@linux.intel.com>
+Cc: Jiri Kosina <ji...@kernel.org>
+Cc: Dave Hansen <dave.han...@intel.com>
+Cc: Andy Lutomirski <l...@kernel.org>
+Cc: Josh Poimboeuf <jpoim...@redhat.com>
+Cc: Dan Williams <dan.j.willi...@intel.com>
+Cc: Linus Torvalds <torva...@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gre...@linux-foundation.org>
+Cc: Kees Cook <keesc...@google.com>
+Link: 
https://lkml.kernel.org/r/20180113232730.31060.36287.st...@tlendack-t1.amdoffice.net
+---
+ arch/x86/include/asm/nospec-branch.h | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
+index 402a11c..7b45d84 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -11,7 +11,7 @@
+  * Fill the CPU return stack buffer.
+  *
+  * Each entry in the RSB, if used for a speculative 'ret', contains an
+- * infinite 'pause; jmp' loop to capture speculative execution.
++ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+  *
+  * This is required in various cases for retpoline and IBRS-based
+  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+@@ -38,11 +38,13 @@
+       call    772f;                           \
+ 773:  /* speculation trap */                  \
+       pause;                                  \
++      lfence;                                 \
+       jmp     773b;                           \
+ 772:                                          \
+       call    774f;                           \
+ 775:  /* speculation trap */                  \
+       pause;                                  \
++      lfence;                                 \
+       jmp     775b;                           \
+ 774:                                          \
+       dec     reg;                            \
+@@ -73,6 +75,7 @@
+       call    .Ldo_rop_\@
+ .Lspec_trap_\@:
+       pause
++      lfence
+       jmp     .Lspec_trap_\@
+ .Ldo_rop_\@:
+       mov     \reg, (%_ASM_SP)
+@@ -165,6 +168,7 @@
+       "       .align 16\n"                                    \
+       "901:   call   903f;\n"                                 \
+       "902:   pause;\n"                                       \
++      "       lfence;\n"                                      \
+       "       jmp    902b;\n"                                 \
+       "       .align 16\n"                                    \
+       "903:   addl   $4, %%esp;\n"                            \
+--
+cgit v1.1
+
_______________________________________________
Frugalware-git mailing list
Frugalware-git@frugalware.org
http://frugalware.org/mailman/listinfo/frugalware-git

Reply via email to