I abandon this patch, I will send a v2 with a minor fix for 85xx.

Mike

> -----Original Message-----
> From: Mihai Caraman [mailto:mihai.cara...@freescale.com]
> Sent: Friday, August 29, 2014 8:04 PM
> To: kvm-ppc@vger.kernel.org
> Cc: k...@vger.kernel.org; Caraman Mihai Claudiu-B02008
> Subject: [PATCH 1/2] KVM: PPC: e500mc: Add support for single threaded
> vcpus on e6500 core
> 
> ePAPR represents hardware threads as cpu node properties in device tree.
> So with existing QEMU, hardware threads are simply exposed as vcpus with
> one hardware thread.
> 
> The e6500 core shares TLBs between hardware threads. Without tlb write
> conditional instruction, the Linux kernel uses per core mechanisms to
> protect against duplicate TLB entries.
> 
> The guest is unable to detect real siblings threads, so it can't use a
> TLB protection mechanism. An alternative solution is to use the
> hypervisor
> to allocate different lpids to guest's vcpus running simultaneous on real
> siblings threads. On systems with two threads per core this patch halves
> the size of the lpid pool that the allocator sees and use two lpids per
> VM.
> Use even numbers to speedup vcpu lpid computation with consecutive lpids
> per VM: vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
> 
> Signed-off-by: Mihai Caraman <mihai.cara...@freescale.com>
> ---
>  arch/powerpc/include/asm/kvm_booke.h |  5 +++-
>  arch/powerpc/kvm/e500.h              | 20 ++++++++++++++++
>  arch/powerpc/kvm/e500_mmu_host.c     | 16 ++++++-------
>  arch/powerpc/kvm/e500mc.c            | 46 ++++++++++++++++++++++++++----
> ------
>  4 files changed, 64 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_booke.h
> b/arch/powerpc/include/asm/kvm_booke.h
> index f7aa5cc..630134d 100644
> --- a/arch/powerpc/include/asm/kvm_booke.h
> +++ b/arch/powerpc/include/asm/kvm_booke.h
> @@ -23,7 +23,10 @@
>  #include <linux/types.h>
>  #include <linux/kvm_host.h>
> 
> -/* LPIDs we support with this build -- runtime limit may be lower */
> +/*
> + * Number of available lpids. Only the low-order 6 bits of LPID rgister
> are
> + * implemented on e500mc+ cores.
> + */
>  #define KVMPPC_NR_LPIDS                        64
> 
>  #define KVMPPC_INST_EHPRIV           0x7c00021c
> diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
> index a326178..7b74453 100644
> --- a/arch/powerpc/kvm/e500.h
> +++ b/arch/powerpc/kvm/e500.h
> @@ -22,6 +22,7 @@
>  #include <linux/kvm_host.h>
>  #include <asm/mmu-book3e.h>
>  #include <asm/tlb.h>
> +#include <asm/cputhreads.h>
> 
>  enum vcpu_ftr {
>       VCPU_FTR_MMU_V2
> @@ -289,6 +290,25 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
> *vcpu_e500);
>  #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)       get_tlb_tid(gtlbe)
>  #define get_tlbmiss_tid(vcpu)           get_cur_pid(vcpu)
>  #define get_tlb_sts(gtlbe)              (gtlbe->mas1 & MAS1_TS)
> +
> +/*
> + * This functios should be called with preemtion disabled
> + * and the returned value is valid only in that context
> + */
> +static inline int get_thread_specific_lpid(int vm_lpid)
> +{
> +     int vcpu_lpid = vm_lpid;
> +
> +     if (threads_per_core == 2)
> +             vcpu_lpid |= smp_processor_id() & 1;
> +
> +     return vcpu_lpid;
> +}
> +
> +static inline int get_lpid(struct kvm_vcpu *vcpu)
> +{
> +     return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
> +}
>  #else
>  unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
>                                     struct kvm_book3e_206_tlb_entry *gtlbe);
> diff --git a/arch/powerpc/kvm/e500_mmu_host.c
> b/arch/powerpc/kvm/e500_mmu_host.c
> index 08f14bb..5759608 100644
> --- a/arch/powerpc/kvm/e500_mmu_host.c
> +++ b/arch/powerpc/kvm/e500_mmu_host.c
> @@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int
> usermode)
>   * writing shadow tlb entry to host TLB
>   */
>  static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry
> *stlbe,
> -                                  uint32_t mas0)
> +                                  uint32_t mas0,
> +                                  uint32_t lpid)
>  {
>       unsigned long flags;
> 
> @@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct
> kvm_book3e_206_tlb_entry *stlbe,
>       mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
>       mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
>  #ifdef CONFIG_KVM_BOOKE_HV
> -     mtspr(SPRN_MAS8, stlbe->mas8);
> +     mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
>  #endif
>       asm volatile("isync; tlbwe" : : : "memory");
> 
> @@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct
> kvmppc_vcpu_e500 *vcpu_e500,
> 
>       if (tlbsel == 0) {
>               mas0 = get_host_mas0(stlbe->mas2);
> -             __write_host_tlbe(stlbe, mas0);
> +             __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm-
> >arch.lpid);
>       } else {
>               __write_host_tlbe(stlbe,
>                                 MAS0_TLBSEL(1) |
> -                               MAS0_ESEL(to_htlb1_esel(sesel)));
> +                               MAS0_ESEL(to_htlb1_esel(sesel)),
> +                               vcpu_e500->vcpu.kvm->arch.lpid);
>       }
>  }
> 
> @@ -317,10 +319,6 @@ static void kvmppc_e500_setup_stlbe(
>       stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags &
> E500_TLB_MAS2_ATTR);
>       stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
>                       e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
> -
> -#ifdef CONFIG_KVM_BOOKE_HV
> -     stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
> -#endif
>  }
> 
>  static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500
> *vcpu_e500,
> @@ -633,7 +631,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum
> instruction_type type,
> 
>       local_irq_save(flags);
>       mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
> -     mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid);
> +     mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
>       asm volatile("tlbsx 0, %[geaddr]\n" : :
>                    [geaddr] "r" (geaddr));
>       mtspr(SPRN_MAS5, 0);
> diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
> index 4549349..bf8f99f 100644
> --- a/arch/powerpc/kvm/e500mc.c
> +++ b/arch/powerpc/kvm/e500mc.c
> @@ -48,10 +48,11 @@ void kvmppc_set_pending_interrupt(struct kvm_vcpu
> *vcpu, enum int_class type)
>               return;
>       }
> 
> -
> -     tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id;
> +     preempt_disable();
> +     tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
>       mb();
>       ppc_msgsnd(dbell_type, 0, tag);
> +     preempt_enable();
>  }
> 
>  /* gtlbe must not be mapped by more than one host tlb entry */
> @@ -60,12 +61,11 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
> *vcpu_e500,
>  {
>       unsigned int tid, ts;
>       gva_t eaddr;
> -     u32 val, lpid;
> +     u32 val;
>       unsigned long flags;
> 
>       ts = get_tlb_ts(gtlbe);
>       tid = get_tlb_tid(gtlbe);
> -     lpid = vcpu_e500->vcpu.kvm->arch.lpid;
> 
>       /* We search the host TLB to invalidate its shadow TLB entry */
>       val = (tid << 16) | ts;
> @@ -74,7 +74,7 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
> *vcpu_e500,
>       local_irq_save(flags);
> 
>       mtspr(SPRN_MAS6, val);
> -     mtspr(SPRN_MAS5, MAS5_SGS | lpid);
> +     mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
> 
>       asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
>       val = mfspr(SPRN_MAS1);
> @@ -95,7 +95,7 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
> *vcpu_e500)
>       unsigned long flags;
> 
>       local_irq_save(flags);
> -     mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid);
> +     mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
>       asm volatile("tlbilxlpid");
>       mtspr(SPRN_MAS5, 0);
>       local_irq_restore(flags);
> @@ -110,6 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32
> old_msr)
>  {
>  }
> 
> +/* We use two lpids per VM */
>  static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS],
> last_vcpu_of_lpid);
> 
>  static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
> @@ -118,10 +119,12 @@ static void kvmppc_core_vcpu_load_e500mc(struct
> kvm_vcpu *vcpu, int cpu)
> 
>       kvmppc_booke_vcpu_load(vcpu, cpu);
> 
> -     mtspr(SPRN_LPID, vcpu->kvm->arch.lpid);
> +     mtspr(SPRN_LPID, get_lpid(vcpu));
>       mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
>       mtspr(SPRN_GPIR, vcpu->vcpu_id);
>       mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
> +     vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
> +     vcpu->arch.epsc = vcpu->arch.eplc;
>       mtspr(SPRN_EPLC, vcpu->arch.eplc);
>       mtspr(SPRN_EPSC, vcpu->arch.epsc);
> 
> @@ -141,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct
> kvm_vcpu *vcpu, int cpu)
>       mtspr(SPRN_GESR, vcpu->arch.shared->esr);
> 
>       if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
> -         __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu)
> {
> +         __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) {
>               kvmppc_e500_tlbil_all(vcpu_e500);
> -             __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] =
> vcpu;
> +             __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu;
>       }
>  }
> 
> @@ -193,8 +196,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
>       vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
>  #endif
>       vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
> -     vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid <<
> EPC_ELPID_SHIFT);
> -     vcpu->arch.epsc = vcpu->arch.eplc;
> 
>       vcpu->arch.pvr = mfspr(SPRN_PVR);
>       vcpu_e500->svr = mfspr(SPRN_SVR);
> @@ -354,13 +355,26 @@ static int kvmppc_core_init_vm_e500mc(struct kvm
> *kvm)
>       if (lpid < 0)
>               return lpid;
> 
> +     /*
> +      * Use two lpids per VM on cores with two threads like e6500. Use
> +      * even numbers to speedup vcpu lpid computation with consecutive
> lpids
> +      * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so
> on.
> +      */
> +     if (threads_per_core == 2)
> +             lpid <<= 1;
> +
>       kvm->arch.lpid = lpid;
>       return 0;
>  }
> 
>  static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
>  {
> -     kvmppc_free_lpid(kvm->arch.lpid);
> +     int lpid = kvm->arch.lpid;
> +
> +     if (threads_per_core == 2)
> +             lpid >>= 1;
> +
> +     kvmppc_free_lpid(lpid);
>  }
> 
>  static struct kvmppc_ops kvm_ops_e500mc = {
> @@ -388,7 +402,13 @@ static int __init kvmppc_e500mc_init(void)
>       if (r)
>               goto err_out;
> 
> -     kvmppc_init_lpid(64);
> +     /*
> +      * Use two lpids per VM on dual threaded processors like e6500
> +      * to workarround the lack of tlb write conditional instruction.
> +      * Expose half the number of available hardware lpids to the lpid
> +      * allocator.
> +      */
> +     kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
>       kvmppc_claim_lpid(0); /* host */
> 
>       r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0,
> THIS_MODULE);
> --
> 1.7.11.7

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to