On Thu, 2015-09-24 at 16:11 +0300, Laurentiu Tudor wrote:
> diff --git a/arch/powerpc/kvm/bookehv_interrupts.S
> b/arch/powerpc/kvm/bookehv_interrupts.S
> index 81bd8a07..1e9fa2a 100644
> --- a/arch/powerpc/kvm/bookehv_interrupts.S
> +++ b/arch/powerpc/kvm/bookehv_interrupts.S
> @@ -62,6 +62,7 @@
> #define NEED_EMU 0x00000001 /* emulation -- save nv regs */
> #define NEED_DEAR 0x00000002 /* save faulting DEAR */
> #define NEED_ESR 0x00000004 /* save faulting ESR */
> +#define NEED_LPER 0x00000008 /* save faulting LPER */
>
> /*
> * On entry:
> @@ -159,6 +160,12 @@
> PPC_STL r9, VCPU_FAULT_DEAR(r4)
> .endif
>
> + /* Only supported on 64-bit cores for now */
> + .if \flags & NEED_LPER
> + mfspr r7, SPRN_LPER
> + std r7, VCPU_FAULT_LPER(r4)
> + .endif
What's the harm in using PPC_STL anyway?
> /*
> * For input register values, see
> arch/powerpc/include/asm/kvm_booke_hv_asm.h
> diff --git a/arch/powerpc/kvm/e500_mmu_host.c
> b/arch/powerpc/kvm/e500_mmu_host.c
> index 12d5c67..99ad88a 100644
> --- a/arch/powerpc/kvm/e500_mmu_host.c
> +++ b/arch/powerpc/kvm/e500_mmu_host.c
> @@ -96,6 +96,112 @@ static inline void __write_host_tlbe(struct
> kvm_book3e_206_tlb_entry *stlbe,
> stlbe->mas2, stlbe->mas7_3);
> }
>
> +#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_BOOKE_HV)
> +static int lrat_next(void)
> +{
Will anything break by removing the CONFIG_64BIT condition, even if we don't
have a 32-bit target that uses this?
> +void kvmppc_lrat_map(struct kvm_vcpu *vcpu, gfn_t gfn)
> +{
> + struct kvm_memory_slot *slot;
> + unsigned long pfn;
> + unsigned long hva;
> + struct vm_area_struct *vma;
> + unsigned long psize;
> + int tsize;
> + unsigned long tsize_pages;
> +
> + slot = gfn_to_memslot(vcpu->kvm, gfn);
> + if (!slot) {
> + pr_err_ratelimited("%s: couldn't find memslot for gfn %lx!\n",
> + __func__, (long)gfn);
> + return;
> + }
> +
> + hva = slot->userspace_addr;
What if the faulting address is somewhere in the middle of the slot?
Shouldn't you use gfn_to_hva_memslot() like kvmppc_e500_shadow_map()? In
fact there's probably a lot of logic that should be shared between these two
functions.
> + down_read(¤t->mm->mmap_sem);
> + vma = find_vma(current->mm, hva);
> + if (vma && (hva >= vma->vm_start)) {
> + psize = vma_kernel_pagesize(vma);
What if it's VM_PFNMAP?
> + } else {
> + pr_err_ratelimited("%s: couldn't find virtual memory address
> for gfn
> %lx!\n",
> + __func__, (long)gfn);
> + up_read(¤t->mm->mmap_sem);
> + return;
> + }
> + up_read(¤t->mm->mmap_sem);
> +
> + pfn = gfn_to_pfn_memslot(slot, gfn);
> + if (is_error_noslot_pfn(pfn)) {
> + pr_err_ratelimited("%s: couldn't get real page for gfn %lx!\n",
> + __func__, (long)gfn);
> + return;
> + }
> +
> + tsize = __ilog2(psize) - 10;
> + tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
1UL << ...
kvmppc_e500_shadow_map needs the same fix.
> + gfn &= ~(tsize_pages - 1);
> + pfn &= ~(tsize_pages - 1);
> +
> + write_host_lrate(tsize, gfn, pfn, vcpu->kvm->arch.lpid, true);
> +
> + kvm_release_pfn_clean(pfn);
> +}
> +
> +void kvmppc_lrat_invalidate(struct kvm_vcpu *vcpu)
> +{
> + uint32_t mas0, mas1 = 0;
> + int esel;
> + unsigned long flags;
> +
> + local_irq_save(flags);
> +
> + /* LRAT does not have a dedicated instruction for invalidation */
> + for (esel = 0; esel < get_paca()->tcd_ptr->lrat_max; esel++) {
> + mas0 = MAS0_ATSEL | MAS0_ESEL(esel);
> + mtspr(SPRN_MAS0, mas0);
> + asm volatile("isync; tlbre" : : : "memory");
> + mas1 = mfspr(SPRN_MAS1) & ~MAS1_VALID;
> + mtspr(SPRN_MAS1, mas1);
> + asm volatile("isync; tlbwe" : : : "memory");
> + }
> + /* Must clear mas8 for other host tlbwe's */
> + mtspr(SPRN_MAS8, 0);
> + isync();
> +
> + local_irq_restore(flags);
> +}
> +#endif /* CONFIG_64BIT && CONFIG_KVM_BOOKE_HV */
> +
> /*
> * Acquire a mas0 with victim hint, as if we just took a TLB miss.
> *
> diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
> index cda695d..5856f8f 100644
> --- a/arch/powerpc/kvm/e500mc.c
> +++ b/arch/powerpc/kvm/e500mc.c
> @@ -99,6 +99,10 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
> *vcpu_e500)
> asm volatile("tlbilxlpid");
> mtspr(SPRN_MAS5, 0);
> local_irq_restore(flags);
> +
> +#ifdef PPC64
> + kvmppc_lrat_invalidate(&vcpu_e500->vcpu);
> +#endif
Don't you mean CONFIG_PPC64 (or CONFIG_64BIT to be consistent)?
> }
>
> void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
> diff --git a/arch/powerpc/mm/fsl_booke_mmu.c
> b/arch/powerpc/mm/fsl_booke_mmu.c
> index 9c90e66..b0da4b9 100644
> --- a/arch/powerpc/mm/fsl_booke_mmu.c
> +++ b/arch/powerpc/mm/fsl_booke_mmu.c
> @@ -194,6 +194,16 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t
> phys, unsigned long virt,
> get_paca()->tcd.esel_next = i;
> get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
> get_paca()->tcd.esel_first = i;
> +
> +#ifdef CONFIG_KVM_BOOKE_HV
> + get_paca()->tcd.lrat_next = 0;
> + if (((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V2) &&
> + (mfspr(SPRN_MMUCFG) & MMUCFG_LRAT)) {
> + get_paca()->tcd.lrat_max = mfspr(SPRN_LRATCFG) & LRATCFG_NENTRY;
> + } else {
> + get_paca()->tcd.lrat_max = 0;
> + }
Unnecessary braces
-Scott
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html