> diff --git a/arch/powerpc/mm/tlb_low_64e.S
> b/arch/powerpc/mm/tlb_low_64e.S
> index efe0f33..8e82772 100644
> --- a/arch/powerpc/mm/tlb_low_64e.S
> +++ b/arch/powerpc/mm/tlb_low_64e.S
> @@ -232,6 +232,173 @@ itlb_miss_fault_bolted:
>       beq     tlb_miss_common_bolted
>       b       itlb_miss_kernel_bolted
> 
> +/*
> + * TLB miss handling for e6500 and derivatives, using hardware
> tablewalk.
> + *
> + * Linear mapping is bolted: no virtual page table or nested TLB misses
> + * Indirect entries in TLB1, hardware loads resulting direct entries
> + *    into TLB0
> + * No HES or NV hint on TLB1, so we need to do software round-robin
> + * No tlbsrx. so we need a spinlock, and we have to deal
> + *    with MAS-damage caused by tlbsx
> + * 4K pages only
> + */
> +
> +     START_EXCEPTION(instruction_tlb_miss_e6500)
> +     tlb_prolog_bolted SPRN_SRR0
> +
> +     ld      r11,PACA_TLB_PER_CORE_PTR(r13)
> +     srdi.   r15,r16,60              /* get region */
> +     ori     r16,r16,1
> +
> +     TLB_MISS_STATS_SAVE_INFO_BOLTED
> +     bne     tlb_miss_kernel_e6500   /* user/kernel test */
> +
> +     b       tlb_miss_common_e6500
> +
> +     START_EXCEPTION(data_tlb_miss_e6500)
> +     tlb_prolog_bolted SPRN_DEAR
> +
> +     ld      r11,PACA_TLB_PER_CORE_PTR(r13)
> +     srdi.   r15,r16,60              /* get region */
> +     rldicr  r16,r16,0,62
> +
> +     TLB_MISS_STATS_SAVE_INFO_BOLTED
> +     bne     tlb_miss_kernel_e6500   /* user vs kernel check */
> +

This ends up calling DO_KVM macro twice with same parameters which
generates the following compile error:

 arch/powerpc/mm/tlb_low_64e.S:307: Error: symbol `kvmppc_resume_14_0x01B' is 
already defined
 arch/powerpc/mm/tlb_low_64e.S:319: Error: symbol `kvmppc_resume_13_0x01B' is 
already defined

We can live with it if we patch DO_KVM like this:

diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h 
b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
index 4610fb0..029ecab 100644
--- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -55,9 +55,9 @@
 #ifdef CONFIG_KVM_BOOKE_HV
 BEGIN_FTR_SECTION
        mtocrf  0x80, r11       /* check MSR[GS] without clobbering reg */
-       bf      3, kvmppc_resume_\intno\()_\srr1
+       bf      3, 1f
        b       kvmppc_handler_\intno\()_\srr1
-kvmppc_resume_\intno\()_\srr1:
+1:
 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
 #endif
 .endm

-Mike

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to