[PATCH 4.14 088/195] [Variant 2/Spectre-v2] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling

2018-02-15 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Marc Zyngier 


Commit f72af90c3783 upstream.

We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible.
So let's intercept it as early as we can by testing for the
function call number as soon as we've identified a HVC call
coming from the guest.

Tested-by: Ard Biesheuvel 
Reviewed-by: Christoffer Dall 
Signed-off-by: Marc Zyngier 
Signed-off-by: Catalin Marinas 
Signed-off-by: Will Deacon 
Signed-off-by: Ard Biesheuvel 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm64/kvm/hyp/hyp-entry.S |   20 ++--
 1 file changed, 18 insertions(+), 2 deletions(-)

--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,7 @@
  * along with this program.  If not, see .
  */
 
+#include 
 #include 
 
 #include 
@@ -64,10 +65,11 @@ alternative_endif
lsr x0, x1, #ESR_ELx_EC_SHIFT
 
cmp x0, #ESR_ELx_EC_HVC64
+   ccmpx0, #ESR_ELx_EC_HVC32, #4, ne
b.neel1_trap
 
-   mrs x1, vttbr_el2   // If vttbr is valid, the 64bit guest
-   cbnzx1, el1_trap// called HVC
+   mrs x1, vttbr_el2   // If vttbr is valid, the guest
+   cbnzx1, el1_hvc_guest   // called HVC
 
/* Here, we're pretty sure the host called HVC. */
ldp x0, x1, [sp], #16
@@ -100,6 +102,20 @@ alternative_endif
 
eret
 
+el1_hvc_guest:
+   /*
+* Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+* The workaround has already been applied on the host,
+* so let's quickly get back to the guest. We don't bother
+* restoring x1, as it can be clobbered anyway.
+*/
+   ldr x1, [sp]// Guest's x0
+   eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+   cbnzw1, el1_trap
+   mov x0, x1
+   add sp, sp, #16
+   eret
+
 el1_trap:
/*
 * x0: ESR_EC




[PATCH 4.14 088/195] [Variant 2/Spectre-v2] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling

2018-02-15 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Marc Zyngier 


Commit f72af90c3783 upstream.

We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible.
So let's intercept it as early as we can by testing for the
function call number as soon as we've identified a HVC call
coming from the guest.

Tested-by: Ard Biesheuvel 
Reviewed-by: Christoffer Dall 
Signed-off-by: Marc Zyngier 
Signed-off-by: Catalin Marinas 
Signed-off-by: Will Deacon 
Signed-off-by: Ard Biesheuvel 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm64/kvm/hyp/hyp-entry.S |   20 ++--
 1 file changed, 18 insertions(+), 2 deletions(-)

--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,7 @@
  * along with this program.  If not, see .
  */
 
+#include 
 #include 
 
 #include 
@@ -64,10 +65,11 @@ alternative_endif
lsr x0, x1, #ESR_ELx_EC_SHIFT
 
cmp x0, #ESR_ELx_EC_HVC64
+   ccmpx0, #ESR_ELx_EC_HVC32, #4, ne
b.neel1_trap
 
-   mrs x1, vttbr_el2   // If vttbr is valid, the 64bit guest
-   cbnzx1, el1_trap// called HVC
+   mrs x1, vttbr_el2   // If vttbr is valid, the guest
+   cbnzx1, el1_hvc_guest   // called HVC
 
/* Here, we're pretty sure the host called HVC. */
ldp x0, x1, [sp], #16
@@ -100,6 +102,20 @@ alternative_endif
 
eret
 
+el1_hvc_guest:
+   /*
+* Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+* The workaround has already been applied on the host,
+* so let's quickly get back to the guest. We don't bother
+* restoring x1, as it can be clobbered anyway.
+*/
+   ldr x1, [sp]// Guest's x0
+   eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+   cbnzw1, el1_trap
+   mov x0, x1
+   add sp, sp, #16
+   eret
+
 el1_trap:
/*
 * x0: ESR_EC