On 25/06/2019 13:19, Alexandru Elisei wrote:
> On 6/21/19 10:38 AM, Marc Zyngier wrote:
>> From: Christoffer Dall <[email protected]>
>>
>> Add stage 2 mmu data structures for virtual EL2 and for nested guests.
>> We don't yet populate shadow stage 2 page tables, but we now have a
>> framework for getting to a shadow stage 2 pgd.
>>
>> We allocate twice the number of vcpus as stage 2 mmu structures because
>> that's sufficient for each vcpu running two VMs without having to flush
>> the stage 2 page tables.
>>
>> Signed-off-by: Christoffer Dall <[email protected]>
>> Signed-off-by: Marc Zyngier <[email protected]>
>> ---
>>  arch/arm/include/asm/kvm_host.h     |   4 +
>>  arch/arm/include/asm/kvm_mmu.h      |   3 +
>>  arch/arm64/include/asm/kvm_host.h   |  28 +++++
>>  arch/arm64/include/asm/kvm_mmu.h    |   8 ++
>>  arch/arm64/include/asm/kvm_nested.h |   7 ++
>>  arch/arm64/kvm/nested.c             | 172 ++++++++++++++++++++++++++++
>>  virt/kvm/arm/arm.c                  |  16 ++-
>>  virt/kvm/arm/mmu.c                  |  31 ++---
>>  8 files changed, 254 insertions(+), 15 deletions(-)
>>
>> diff --git a/arch/arm/include/asm/kvm_host.h 
>> b/arch/arm/include/asm/kvm_host.h
>> index e3217c4ad25b..b821eb2383ad 100644
>> --- a/arch/arm/include/asm/kvm_host.h
>> +++ b/arch/arm/include/asm/kvm_host.h
>> @@ -424,4 +424,8 @@ static inline bool kvm_arm_vcpu_is_finalized(struct 
>> kvm_vcpu *vcpu)
>>      return true;
>>  }
>>  
>> +static inline void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu) {}
>> +static inline void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu) {}
>> +static inline int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu) { return 0; }
>> +
>>  #endif /* __ARM_KVM_HOST_H__ */
>> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
>> index be23e3f8e08c..e6984b6da2ce 100644
>> --- a/arch/arm/include/asm/kvm_mmu.h
>> +++ b/arch/arm/include/asm/kvm_mmu.h
>> @@ -420,6 +420,9 @@ static inline int hyp_map_aux_data(void)
>>  
>>  static inline void kvm_set_ipa_limit(void) {}
>>  
>> +static inline void kvm_init_s2_mmu(struct kvm_s2_mmu *mmu) {}
>> +static inline void kvm_init_nested(struct kvm *kvm) {}
>> +
>>  static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
>>  {
>>      struct kvm_vmid *vmid = &mmu->vmid;
>> diff --git a/arch/arm64/include/asm/kvm_host.h 
>> b/arch/arm64/include/asm/kvm_host.h
>> index 3dee5e17a4ee..cc238de170d2 100644
>> --- a/arch/arm64/include/asm/kvm_host.h
>> +++ b/arch/arm64/include/asm/kvm_host.h
>> @@ -88,11 +88,39 @@ struct kvm_s2_mmu {
>>      phys_addr_t     pgd_phys;
>>  
>>      struct kvm *kvm;
>> +
>> +    /*
>> +     * For a shadow stage-2 MMU, the virtual vttbr programmed by the guest
>> +     * hypervisor.  Unused for kvm_arch->mmu. Set to 1 when the structure
>> +     * contains no valid information.
>> +     */
>> +    u64     vttbr;
>> +
>> +    /* true when this represents a nested context where virtual HCR_EL2.VM 
>> == 1 */
>> +    bool    nested_stage2_enabled;
>> +
>> +    /*
>> +     *  0: Nobody is currently using this, check vttbr for validity
>> +     * >0: Somebody is actively using this.
>> +     */
>> +    atomic_t refcnt;
>>  };
>>  
>> +static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
>> +{
>> +    return !(mmu->vttbr & 1);
>> +}
>> +
>>  struct kvm_arch {
>>      struct kvm_s2_mmu mmu;
>>  
>> +    /*
>> +     * Stage 2 paging stage for VMs with nested virtual using a virtual
>> +     * VMID.
>> +     */
>> +    struct kvm_s2_mmu *nested_mmus;
>> +    size_t nested_mmus_size;
>> +
>>      /* VTCR_EL2 value for this VM */
>>      u64    vtcr;
>>  
>> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
>> b/arch/arm64/include/asm/kvm_mmu.h
>> index 1eb6e0ca61c2..32bcaa1845dc 100644
>> --- a/arch/arm64/include/asm/kvm_mmu.h
>> +++ b/arch/arm64/include/asm/kvm_mmu.h
>> @@ -100,6 +100,7 @@ alternative_cb_end
>>  #include <asm/mmu_context.h>
>>  #include <asm/pgtable.h>
>>  #include <asm/kvm_emulate.h>
>> +#include <asm/kvm_nested.h>
>>  
>>  void kvm_update_va_mask(struct alt_instr *alt,
>>                      __le32 *origptr, __le32 *updptr, int nr_inst);
>> @@ -164,6 +165,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, 
>> size_t size,
>>                           void **haddr);
>>  void free_hyp_pgds(void);
>>  
>> +void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 
>> size);
>>  void stage2_unmap_vm(struct kvm *kvm);
>>  int kvm_alloc_stage2_pgd(struct kvm_s2_mmu *mmu);
>>  void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
>> @@ -635,5 +637,11 @@ static __always_inline void __load_guest_stage2(struct 
>> kvm_s2_mmu *mmu)
>>      asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
>>  }
>>  
>> +static inline u64 get_vmid(u64 vttbr)
>> +{
>> +    return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
>> +            VTTBR_VMID_SHIFT;
>> +}
>> +
>>  #endif /* __ASSEMBLY__ */
>>  #endif /* __ARM64_KVM_MMU_H__ */
>> diff --git a/arch/arm64/include/asm/kvm_nested.h 
>> b/arch/arm64/include/asm/kvm_nested.h
>> index 61e71d0d2151..d4021d0892bd 100644
>> --- a/arch/arm64/include/asm/kvm_nested.h
>> +++ b/arch/arm64/include/asm/kvm_nested.h
>> @@ -10,6 +10,13 @@ static inline bool nested_virt_in_use(const struct 
>> kvm_vcpu *vcpu)
>>              test_bit(KVM_ARM_VCPU_NESTED_VIRT, vcpu->arch.features);
>>  }
>>  
>> +extern void kvm_init_nested(struct kvm *kvm);
>> +extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
>> +extern void kvm_init_s2_mmu(struct kvm_s2_mmu *mmu);
>> +extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 
>> hcr);
>> +extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
>> +extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
>> +
>>  int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
>>  extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit);
>>  extern bool forward_nv_traps(struct kvm_vcpu *vcpu);
>> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
>> index 3872e3cf1691..4b38dc5c0be3 100644
>> --- a/arch/arm64/kvm/nested.c
>> +++ b/arch/arm64/kvm/nested.c
>> @@ -18,7 +18,161 @@
>>  #include <linux/kvm.h>
>>  #include <linux/kvm_host.h>
>>  
>> +#include <asm/kvm_arm.h>
>>  #include <asm/kvm_emulate.h>
>> +#include <asm/kvm_mmu.h>
>> +#include <asm/kvm_nested.h>
>> +
>> +void kvm_init_nested(struct kvm *kvm)
>> +{
>> +    kvm_init_s2_mmu(&kvm->arch.mmu);
>> +
>> +    kvm->arch.nested_mmus = NULL;
>> +    kvm->arch.nested_mmus_size = 0;
>> +}
>> +
>> +int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
>> +{
>> +    struct kvm *kvm = vcpu->kvm;
>> +    struct kvm_s2_mmu *tmp;
>> +    int num_mmus;
>> +    int ret = -ENOMEM;
>> +
>> +    if (!test_bit(KVM_ARM_VCPU_NESTED_VIRT, vcpu->arch.features))
>> +            return 0;
>> +
>> +    if (!cpus_have_const_cap(ARM64_HAS_NESTED_VIRT))
>> +            return -EINVAL;
> 
> Here we fail if KVM_ARM_VCPU_NESTED_VIRT features was requested from the 
> virtual
> vcpu, but the nested capability isn't present. This function is called as a
> result of the KVM_ARM_VCPU_INIT, and when this function fails, the
> KVM_ARM_VCPU_INIT ioctl will also fail. This means that we cannot have a vcpu
> with the nested virt feature on a system which doesn't support nested
> virtualization.
> 
> However, commit 04/59 "KVM: arm64: nv: Introduce nested virtualization VCPU
> feature" added the function nested_virt_in_use (in
> arch/arm64/include/asm/kvm_nested.h) which checks for **both** conditions 
> before
> returning true. I believe the capability check is not required in
> nested_virt_in_use.

The capability check is a static branch, meaning that if there is no
actual evaluation, just a branch. If you remove this branch, you end-up
checking the feature bit even on systems that do not have NV, and that'd
be a measurable performance drag.

Thanks,

        M.
-- 
Jazz is not dead. It just smells funny...
_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to