Re: [PATCH 26/26] KVM: arm64: Parametrize exception entry with a target EL

2020-05-27 Thread Mark Rutland
On Wed, May 27, 2020 at 10:34:09AM +0100, Marc Zyngier wrote:
> HI Mark,
> 
> On 2020-05-19 11:44, Mark Rutland wrote:
> > On Wed, Apr 22, 2020 at 01:00:50PM +0100, Marc Zyngier wrote:
> > > -static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
> > > +static void enter_exception(struct kvm_vcpu *vcpu, unsigned long
> > > target_mode,
> > > + enum exception_type type)
> > 
> > Since this is all for an AArch64 target, could we keep `64` in the name,
> > e.g enter_exception64? That'd mirror the callers below.
> > 
> > >  {
> > > - unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
> > > - unsigned long old, new;
> > > + unsigned long sctlr, vbar, old, new, mode;
> > > + u64 exc_offset;
> > > +
> > > + mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
> > > +
> > > + if  (mode == target_mode)
> > > + exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> > > + else if ((mode | 1) == target_mode)
> > > + exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> > 
> > It would be nice if we could add a mnemonic for the `1` here, e.g.
> > PSR_MODE_SP0 or PSR_MODE_THREAD_BIT.
> 
> I've addressed both comments as follows:
> 
> diff --git a/arch/arm64/include/asm/ptrace.h
> b/arch/arm64/include/asm/ptrace.h
> index bf57308fcd63..953b6a1ce549 100644
> --- a/arch/arm64/include/asm/ptrace.h
> +++ b/arch/arm64/include/asm/ptrace.h
> @@ -35,6 +35,7 @@
>  #define GIC_PRIO_PSR_I_SET   (1 << 4)
> 
>  /* Additional SPSR bits not exposed in the UABI */
> +#define PSR_MODE_THREAD_BIT  (1 << 0)
>  #define PSR_IL_BIT   (1 << 20)
> 
>  /* AArch32-specific ptrace requests */
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 3dbcbc839b9c..ebfdfc27b2bd 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -43,8 +43,8 @@ enum exception_type {
>   * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout,
> from
>   * MSB to LSB.
>   */
> -static void enter_exception(struct kvm_vcpu *vcpu, unsigned long
> target_mode,
> - enum exception_type type)
> +static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long
> target_mode,
> +   enum exception_type type)
>  {
>   unsigned long sctlr, vbar, old, new, mode;
>   u64 exc_offset;
> @@ -53,7 +53,7 @@ static void enter_exception(struct kvm_vcpu *vcpu,
> unsigned long target_mode,
> 
>   if  (mode == target_mode)
>   exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> - else if ((mode | 1) == target_mode)
> + else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
>   exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>   else if (!(mode & PSR_MODE32_BIT))
>   exc_offset = LOWER_EL_AArch64_VECTOR;
> @@ -126,7 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool
> is_iabt, unsigned long addr
>   bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
>   u32 esr = 0;
> 
> - enter_exception(vcpu, PSR_MODE_EL1h, except_type_sync);
> + enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
> 
>   vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
> 
> @@ -156,7 +156,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>  {
>   u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
> 
> - enter_exception(vcpu, PSR_MODE_EL1h, except_type_sync);
> + enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
> 
>   /*
>* Build an unknown exception, depending on the instruction

Thanks; that all looks good to me, and my R-b stands!

Mark.
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 26/26] KVM: arm64: Parametrize exception entry with a target EL

2020-05-27 Thread Marc Zyngier

HI Mark,

On 2020-05-19 11:44, Mark Rutland wrote:

On Wed, Apr 22, 2020 at 01:00:50PM +0100, Marc Zyngier wrote:

We currently assume that an exception is delivered to EL1, always.
Once we emulate EL2, this no longer will be the case. To prepare
for this, add a target_mode parameter.

While we're at it, merge the computing of the target PC and PSTATE in
a single function that updates both PC and CPSR after saving their
previous values in the corresponding ELR/SPSR. This ensures that they
are updated in the correct order (a pretty common source of bugs...).

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/inject_fault.c | 75 
++-

 1 file changed, 38 insertions(+), 37 deletions(-)

diff --git a/arch/arm64/kvm/inject_fault.c 
b/arch/arm64/kvm/inject_fault.c

index d3ebf8bca4b89..3dbcbc839b9c3 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -26,28 +26,12 @@ enum exception_type {
except_type_serror  = 0x180,
 };

-static u64 get_except_vector(struct kvm_vcpu *vcpu, enum 
exception_type type)

-{
-   u64 exc_offset;
-
-   switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
-   case PSR_MODE_EL1t:
-   exc_offset = CURRENT_EL_SP_EL0_VECTOR;
-   break;
-   case PSR_MODE_EL1h:
-   exc_offset = CURRENT_EL_SP_ELx_VECTOR;
-   break;
-   case PSR_MODE_EL0t:
-   exc_offset = LOWER_EL_AArch64_VECTOR;
-   break;
-   default:
-   exc_offset = LOWER_EL_AArch32_VECTOR;
-   }
-
-   return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
-}
-
 /*
+ * This performs the exception entry at a given EL (@target_mode), 
stashing PC
+ * and PSTATE into ELR and SPSR respectively, and compute the new 
PC/PSTATE.
+ * The EL passed to this function *must* be a non-secure, privileged 
mode with

+ * bit 0 being set (PSTATE.SP == 1).
+ *
  * When an exception is taken, most PSTATE fields are left unchanged 
in the
  * handler. However, some are explicitly overridden (e.g. M[4:0]). 
Luckily all
  * of the inherited bits have the same position in the 
AArch64/AArch32 SPSR_ELx
@@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu 
*vcpu, enum exception_type type)
  * Here we manipulate the fields in order of the AArch64 SPSR_ELx 
layout, from

  * MSB to LSB.
  */
-static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
+static void enter_exception(struct kvm_vcpu *vcpu, unsigned long 
target_mode,

+   enum exception_type type)


Since this is all for an AArch64 target, could we keep `64` in the 
name,

e.g enter_exception64? That'd mirror the callers below.


 {
-   unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
-   unsigned long old, new;
+   unsigned long sctlr, vbar, old, new, mode;
+   u64 exc_offset;
+
+   mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
+
+   if  (mode == target_mode)
+   exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+   else if ((mode | 1) == target_mode)
+   exc_offset = CURRENT_EL_SP_EL0_VECTOR;


It would be nice if we could add a mnemonic for the `1` here, e.g.
PSR_MODE_SP0 or PSR_MODE_THREAD_BIT.


I've addressed both comments as follows:

diff --git a/arch/arm64/include/asm/ptrace.h 
b/arch/arm64/include/asm/ptrace.h

index bf57308fcd63..953b6a1ce549 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -35,6 +35,7 @@
 #define GIC_PRIO_PSR_I_SET (1 << 4)

 /* Additional SPSR bits not exposed in the UABI */
+#define PSR_MODE_THREAD_BIT(1 << 0)
 #define PSR_IL_BIT (1 << 20)

 /* AArch32-specific ptrace requests */
diff --git a/arch/arm64/kvm/inject_fault.c 
b/arch/arm64/kvm/inject_fault.c

index 3dbcbc839b9c..ebfdfc27b2bd 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -43,8 +43,8 @@ enum exception_type {
  * Here we manipulate the fields in order of the AArch64 SPSR_ELx 
layout, from

  * MSB to LSB.
  */
-static void enter_exception(struct kvm_vcpu *vcpu, unsigned long 
target_mode,

-   enum exception_type type)
+static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long 
target_mode,

+ enum exception_type type)
 {
unsigned long sctlr, vbar, old, new, mode;
u64 exc_offset;
@@ -53,7 +53,7 @@ static void enter_exception(struct kvm_vcpu *vcpu, 
unsigned long target_mode,


if  (mode == target_mode)
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
-   else if ((mode | 1) == target_mode)
+   else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
else if (!(mode & PSR_MODE32_BIT))
exc_offset = LOWER_EL_AArch64_VECTOR;
@@ -126,7 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool 
is_iabt, unsigned long addr

 

Re: [PATCH 26/26] KVM: arm64: Parametrize exception entry with a target EL

2020-05-19 Thread Mark Rutland
On Wed, Apr 22, 2020 at 01:00:50PM +0100, Marc Zyngier wrote:
> We currently assume that an exception is delivered to EL1, always.
> Once we emulate EL2, this no longer will be the case. To prepare
> for this, add a target_mode parameter.
> 
> While we're at it, merge the computing of the target PC and PSTATE in
> a single function that updates both PC and CPSR after saving their
> previous values in the corresponding ELR/SPSR. This ensures that they
> are updated in the correct order (a pretty common source of bugs...).
> 
> Signed-off-by: Marc Zyngier 
> ---
>  arch/arm64/kvm/inject_fault.c | 75 ++-
>  1 file changed, 38 insertions(+), 37 deletions(-)
> 
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index d3ebf8bca4b89..3dbcbc839b9c3 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -26,28 +26,12 @@ enum exception_type {
>   except_type_serror  = 0x180,
>  };
>  
> -static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
> -{
> - u64 exc_offset;
> -
> - switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
> - case PSR_MODE_EL1t:
> - exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> - break;
> - case PSR_MODE_EL1h:
> - exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> - break;
> - case PSR_MODE_EL0t:
> - exc_offset = LOWER_EL_AArch64_VECTOR;
> - break;
> - default:
> - exc_offset = LOWER_EL_AArch32_VECTOR;
> - }
> -
> - return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
> -}
> -
>  /*
> + * This performs the exception entry at a given EL (@target_mode), stashing 
> PC
> + * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
> + * The EL passed to this function *must* be a non-secure, privileged mode 
> with
> + * bit 0 being set (PSTATE.SP == 1).
> + *
>   * When an exception is taken, most PSTATE fields are left unchanged in the
>   * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily 
> all
>   * of the inherited bits have the same position in the AArch64/AArch32 
> SPSR_ELx
> @@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum 
> exception_type type)
>   * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, 
> from
>   * MSB to LSB.
>   */
> -static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
> +static void enter_exception(struct kvm_vcpu *vcpu, unsigned long target_mode,
> + enum exception_type type)

Since this is all for an AArch64 target, could we keep `64` in the name,
e.g enter_exception64? That'd mirror the callers below.

>  {
> - unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
> - unsigned long old, new;
> + unsigned long sctlr, vbar, old, new, mode;
> + u64 exc_offset;
> +
> + mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
> +
> + if  (mode == target_mode)
> + exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> + else if ((mode | 1) == target_mode)
> + exc_offset = CURRENT_EL_SP_EL0_VECTOR;

It would be nice if we could add a mnemonic for the `1` here, e.g.
PSR_MODE_SP0 or PSR_MODE_THREAD_BIT.

> + else if (!(mode & PSR_MODE32_BIT))
> + exc_offset = LOWER_EL_AArch64_VECTOR;
> + else
> + exc_offset = LOWER_EL_AArch32_VECTOR;

Other than the above, I couldn't think of a nicer way of writing thism
and AFAICT this is correct.

> +
> + switch (target_mode) {
> + case PSR_MODE_EL1h:
> + vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
> + sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
> + vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
> + break;
> + default:
> + /* Don't do that */
> + BUG();
> + }
> +
> + *vcpu_pc(vcpu) = vbar + exc_offset + type;
>  
>   old = *vcpu_cpsr(vcpu);
>   new = 0;
> @@ -105,9 +114,10 @@ static unsigned long get_except64_pstate(struct kvm_vcpu 
> *vcpu)
>   new |= PSR_I_BIT;
>   new |= PSR_F_BIT;
>  
> - new |= PSR_MODE_EL1h;
> + new |= target_mode;

As a heads-up, some of the other bits will need to change for an EL2
target (e.g. SPAN will depend on HCR_EL2.E2H), but as-is this this is
fine.

Regardless of the above comments:

Reviewed-by: Mark Rutland 

Mark.
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 26/26] KVM: arm64: Parametrize exception entry with a target EL

2020-04-22 Thread Marc Zyngier
We currently assume that an exception is delivered to EL1, always.
Once we emulate EL2, this no longer will be the case. To prepare
for this, add a target_mode parameter.

While we're at it, merge the computing of the target PC and PSTATE in
a single function that updates both PC and CPSR after saving their
previous values in the corresponding ELR/SPSR. This ensures that they
are updated in the correct order (a pretty common source of bugs...).

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/inject_fault.c | 75 ++-
 1 file changed, 38 insertions(+), 37 deletions(-)

diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index d3ebf8bca4b89..3dbcbc839b9c3 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -26,28 +26,12 @@ enum exception_type {
except_type_serror  = 0x180,
 };
 
-static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
-{
-   u64 exc_offset;
-
-   switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
-   case PSR_MODE_EL1t:
-   exc_offset = CURRENT_EL_SP_EL0_VECTOR;
-   break;
-   case PSR_MODE_EL1h:
-   exc_offset = CURRENT_EL_SP_ELx_VECTOR;
-   break;
-   case PSR_MODE_EL0t:
-   exc_offset = LOWER_EL_AArch64_VECTOR;
-   break;
-   default:
-   exc_offset = LOWER_EL_AArch32_VECTOR;
-   }
-
-   return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
-}
-
 /*
+ * This performs the exception entry at a given EL (@target_mode), stashing PC
+ * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
+ * The EL passed to this function *must* be a non-secure, privileged mode with
+ * bit 0 being set (PSTATE.SP == 1).
+ *
  * When an exception is taken, most PSTATE fields are left unchanged in the
  * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
  * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
@@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum 
exception_type type)
  * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
  * MSB to LSB.
  */
-static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
+static void enter_exception(struct kvm_vcpu *vcpu, unsigned long target_mode,
+   enum exception_type type)
 {
-   unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
-   unsigned long old, new;
+   unsigned long sctlr, vbar, old, new, mode;
+   u64 exc_offset;
+
+   mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
+
+   if  (mode == target_mode)
+   exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+   else if ((mode | 1) == target_mode)
+   exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+   else if (!(mode & PSR_MODE32_BIT))
+   exc_offset = LOWER_EL_AArch64_VECTOR;
+   else
+   exc_offset = LOWER_EL_AArch32_VECTOR;
+
+   switch (target_mode) {
+   case PSR_MODE_EL1h:
+   vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
+   sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+   vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
+   break;
+   default:
+   /* Don't do that */
+   BUG();
+   }
+
+   *vcpu_pc(vcpu) = vbar + exc_offset + type;
 
old = *vcpu_cpsr(vcpu);
new = 0;
@@ -105,9 +114,10 @@ static unsigned long get_except64_pstate(struct kvm_vcpu 
*vcpu)
new |= PSR_I_BIT;
new |= PSR_F_BIT;
 
-   new |= PSR_MODE_EL1h;
+   new |= target_mode;
 
-   return new;
+   *vcpu_cpsr(vcpu) = new;
+   vcpu_write_spsr(vcpu, old);
 }
 
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long 
addr)
@@ -116,11 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool 
is_iabt, unsigned long addr
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
u32 esr = 0;
 
-   vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
-   *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
-
-   *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
-   vcpu_write_spsr(vcpu, cpsr);
+   enter_exception(vcpu, PSR_MODE_EL1h, except_type_sync);
 
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
 
@@ -148,14 +154,9 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool 
is_iabt, unsigned long addr
 
 static void inject_undef64(struct kvm_vcpu *vcpu)
 {
-   unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
 
-   vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
-   *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
-
-   *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
-   vcpu_write_spsr(vcpu, cpsr);
+   enter_exception(vcpu, PSR_MODE_EL1h, except_type_sync);
 
/*
 * Build an