Re: [PATCH v2 14/21] arm64: KVM: HYP mode entry points

2015-12-02 Thread Christoffer Dall
On Fri, Nov 27, 2015 at 06:50:08PM +, Marc Zyngier wrote:
> Add the entry points for HYP mode (both for hypercalls and
> exception handling).
> 
> Signed-off-by: Marc Zyngier 

Reviewed-by: Christoffer Dall 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 14/21] arm64: KVM: HYP mode entry points

2015-11-27 Thread Marc Zyngier
Add the entry points for HYP mode (both for hypercalls and
exception handling).

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile|   1 +
 arch/arm64/kvm/hyp/hyp-entry.S | 198 +
 2 files changed, 199 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/hyp-entry.S

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 1a529f5..826032b 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += switch.o
 obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
new file mode 100644
index 000..8334407
--- /dev/null
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+   .text
+   .pushsection.hyp.text, "ax"
+
+el1_sync:  // Guest trapped into EL2
+   pushx0, x1
+   pushx2, x3
+
+   mrs x1, esr_el2
+   lsr x2, x1, #ESR_ELx_EC_SHIFT
+
+   cmp x2, #ESR_ELx_EC_HVC64
+   b.neel1_trap
+
+   mrs x3, vttbr_el2   // If vttbr is valid, the 64bit 
guest
+   cbnzx3, el1_trap// called HVC
+
+   /* Here, we're pretty sure the host called HVC. */
+   pop x2, x3
+   pop x0, x1
+
+   /* Check for __hyp_get_vectors */
+   cbnzx0, 1f
+   mrs x0, vbar_el2
+   b   2f
+
+1: pushlr, xzr
+
+   /*
+* Compute the function address in EL2, and shuffle the parameters.
+*/
+   kern_hyp_va x0
+   mov lr, x0
+   mov x0, x1
+   mov x1, x2
+   mov x2, x3
+   blr lr
+
+   pop lr, xzr
+2: eret
+
+el1_trap:
+   /*
+* x1: ESR
+* x2: ESR_EC
+*/
+
+   /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+   cmp x2, #ESR_ELx_EC_FP_ASIMD
+   b.eq__fpsimd_guest_restore
+
+   cmp x2, #ESR_ELx_EC_DABT_LOW
+   mov x0, #ESR_ELx_EC_IABT_LOW
+   ccmpx2, x0, #4, ne
+   b.ne1f  // Not an abort we care about
+
+   /* This is an abort. Check for permission fault */
+alternative_if_not ARM64_WORKAROUND_834220
+   and x2, x1, #ESR_ELx_FSC_TYPE
+   cmp x2, #FSC_PERM
+   b.ne1f  // Not a permission fault
+alternative_else
+   nop // Use the permission fault path to
+   nop // check for a valid S1 translation,
+   nop // regardless of the ESR value.
+alternative_endif
+
+
+   /*
+* Check for Stage-1 page table walk, which is guaranteed
+* to give a valid HPFAR_EL2.
+*/
+   tbnzx1, #7, 1f  // S1PTW is set
+
+   /* Preserve PAR_EL1 */
+   mrs x3, par_el1
+   pushx3, xzr
+
+   /*
+* Permission fault, HPFAR_EL2 is invalid.
+* Resolve the IPA the hard way using the guest VA.
+* Stage-1 translation already validated the memory access rights.
+* As such, we can use the EL1 translation regime, and don't have
+* to distinguish between EL0 and EL1 access.
+*/
+   mrs x2, far_el2
+   at  s1e1r, x2
+   isb
+
+   /* Read result */
+   mrs x3, par_el1
+   pop x0, xzr // Restore PAR_EL1 from the stack
+   msr par_el1, x0
+   tbnzx3, #0, 3f  // Bail out if we failed the translation
+   ubfxx3, x3, #12, #36// Extract IPA
+   lsl x3, x3, #4  // and present it like HPFAR
+   b   2f
+
+1: mrs x3, hpfar_el2
+   mrs x2, far_el2
+
+2: mrs x0, tpidr_el2
+   str w1, [x0, #VCPU_ESR_EL2]
+   str x2, [x0, #VCPU_FAR_EL2]
+   str x3, [x0, #VCPU_HPFAR_EL2]
+
+   mov x1, #ARM_EXCEPTION_TRAP
+   b   __guest_exit
+
+   /*
+* Translation failed. Just return to the guest and
+* let it fault again. Another CPU is probably playing