On 8/22/19 12:57 PM, Alexandru Elisei wrote:
> [..]
> I tried to fix it with the following patch, inject_undef64 was similarly
> broken:
>
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index fac962b467bd..aee8a9ef36d5 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -53,15 +53,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool
> is_iabt,
> unsigned long addr
> {
> unsigned long cpsr = *vcpu_cpsr(vcpu);
> bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
> - u32 esr = 0;
> -
> - vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
> - *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
> -
> - *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> - vcpu_write_spsr(vcpu, cpsr);
> -
> - vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
> + u32 esr = ESR_ELx_FSC_EXTABT;
>
> /*
> * Build an {i,d}abort, depending on the level and the
> @@ -82,13 +74,12 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool
> is_iabt, unsigned long addr
> if (!is_iabt)
> esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
>
> - vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
> -}
> + if (nested_virt_in_use(vcpu)) {
> + kvm_inject_nested_sync(vcpu, esr);
> + return;
> + }
>
> -static void inject_undef64(struct kvm_vcpu *vcpu)
> -{
> - unsigned long cpsr = *vcpu_cpsr(vcpu);
> - u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
> + vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
>
> vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
> *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
> @@ -96,6 +87,14 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
> *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> vcpu_write_spsr(vcpu, cpsr);
>
> + vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
> +}
> +
> +static void inject_undef64(struct kvm_vcpu *vcpu)
> +{
> + unsigned long cpsr = *vcpu_cpsr(vcpu);
> + u32 esr = ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT;
> +
> /*
> * Build an unknown exception, depending on the instruction
> * set.
> @@ -103,7 +102,18 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
> if (kvm_vcpu_trap_il_is32bit(vcpu))
> esr |= ESR_ELx_IL;
>
> + if (nested_virt_in_use(vcpu)) {
> + kvm_inject_nested_sync(vcpu, esr);
> + return;
> + }
> +
> vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
> +
> + vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
> + *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
> +
> + *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> + vcpu_write_spsr(vcpu, cpsr);
> }
>
> /**
>
Oops, the above is broken for anything running under a L1 guest hypervisor.
Hopefully this is better:
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index fac962b467bd..952e49aeb6f0 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -53,15 +53,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt,
unsigned long addr
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
- u32 esr = 0;
-
- vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
- *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
-
- *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
- vcpu_write_spsr(vcpu, cpsr);
-
- vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
+ u32 esr = ESR_ELx_FSC_EXTABT;
/*
* Build an {i,d}abort, depending on the level and the
@@ -82,13 +74,12 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool
is_iabt, unsigned long addr
if (!is_iabt)
esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
- vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
-}
+ if (is_hyp_ctxt(vcpu)) {
+ kvm_inject_nested_sync(vcpu, esr);
+ return;
+ }
-static void inject_undef64(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
+ vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
@@ -96,6 +87,14 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
vcpu_write_spsr(vcpu, cpsr);
+ vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
+}
+
+static void inject_undef64(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ u32 esr = ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT;
+
/*
* Build an unknown exception, depending on the instruction
* set.
@@ -103,7 +102,18 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
if (kvm_vcpu_trap_il_is32bit(vcpu))
esr |= ESR_ELx_IL;
+ if (is_hyp_ctxt(vcpu)) {
+ kvm_inject_nested_sync(vcpu, esr);
+ return;
+ }
+
vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
+
+ vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
+ *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
+
+ *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+ vcpu_write_spsr(vcpu, cpsr);
}
/**
_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm