The shadow id mappings is based on Hollis's idea.

We can benifit a lot from this trick:

1. Support AS=1 in guest.
So that OSes other than Linux can be expected to run in the guest.

2. Minimize the frequency of TLB flushes.

Signed-off-by: Liu Yu <yu....@freescale.com>
---
 arch/powerpc/include/asm/kvm_e500.h |    3 +
 arch/powerpc/include/asm/kvm_ppc.h  |    1 +
 arch/powerpc/kernel/asm-offsets.c   |    1 +
 arch/powerpc/kvm/booke.h            |    4 +
 arch/powerpc/kvm/booke_interrupts.S |   11 +++
 arch/powerpc/kvm/e500_emulate.c     |   10 ++-
 arch/powerpc/kvm/e500_tlb.c         |  146 +++++++++++++++++++++++++++++++++--
 arch/powerpc/kvm/e500_tlb.h         |    2 +
 8 files changed, 167 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_e500.h 
b/arch/powerpc/include/asm/kvm_e500.h
index ffa111b..e5ca811 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -43,6 +43,9 @@ struct kvmppc_vcpu_e500 {
        /* Pages which are referenced in the shadow TLB. */
        struct kvmppc_e500_shadow_ref *shadow_refs[E500_TLB_NUM];
 
+       /* MMU id mapping */
+       void *id_mapping;
+
        unsigned int guest_tlb_size[E500_TLB_NUM];
        unsigned int shadow_tlb_size[E500_TLB_NUM];
        unsigned int guest_tlb_nv[E500_TLB_NUM];
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 2c6ee34..40823c4 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -57,6 +57,7 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
                            unsigned int gtlb_idx);
 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
+extern void kvmppc_mmu_as_switch(struct kvm_vcpu *vcpu, unsigned int as);
 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index 42fe4da..89d28df 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -379,6 +379,7 @@ int main(void)
        DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
        DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
        DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+       DEFINE(VCPU_SWAP_PID, offsetof(struct kvm_vcpu, arch.swap_pid));
 #endif
 #ifdef CONFIG_44x
        DEFINE(PGD_T_LOG2, PGD_T_LOG2);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index d59bcca..96e6cc0 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -57,6 +57,10 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 
new_msr)
        if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
                kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
 
+       if ((new_msr & (MSR_IS | MSR_DS)) !=
+                       (vcpu->arch.msr & (MSR_IS | MSR_DS)))
+               kvmppc_mmu_as_switch(vcpu, new_msr & (MSR_IS | MSR_DS));
+
        vcpu->arch.msr = new_msr;
 
        if (vcpu->arch.msr & MSR_WE) {
diff --git a/arch/powerpc/kvm/booke_interrupts.S 
b/arch/powerpc/kvm/booke_interrupts.S
index d0c6f84..12383fe 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -192,6 +192,12 @@ _GLOBAL(kvmppc_resume_host)
        lwz     r3, VCPU_HOST_PID(r4)
        mtspr   SPRN_PID, r3
 
+#ifdef CONFIG_E500
+       /* we cheat and know Linux doesn't use PID1 which is always 0 */
+       lis     r3, 0
+       mtspr   SPRN_PID1, r3
+#endif
+
        /* Restore host IVPR before re-enabling interrupts. We cheat and know
         * that Linux IVPR is always 0xc0000000. */
        lis     r3, 0xc000
@@ -350,6 +356,11 @@ lightweight_exit:
        lwz     r3, VCPU_SHADOW_PID(r4)
        mtspr   SPRN_PID, r3
 
+#ifdef CONFIG_E500
+       lwz     r3, VCPU_SWAP_PID(r4)
+       mtspr   SPRN_PID1, r3
+#endif
+
 #ifdef CONFIG_44x
        iccci   0, 0 /* XXX hack */
 #endif
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 3f76041..8d85655 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -76,10 +76,14 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int 
sprn, int rs)
        int emulated = EMULATE_DONE;
 
        switch (sprn) {
-       case SPRN_PID:
-               vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
-                       vcpu->arch.pid = vcpu->arch.gpr[rs];
+       case SPRN_PID: {
+               unsigned int as = !!(vcpu->arch.msr & (MSR_IS | MSR_DS));
+
+               vcpu_e500->pid[0] = vcpu->arch.pid = vcpu->arch.gpr[rs];
+               vcpu->arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, as,
+                                                   get_cur_pid(vcpu));
                break;
+       }
        case SPRN_PID1:
                vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break;
        case SPRN_PID2:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 847dfec..7b614a9 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -27,6 +27,101 @@
 
 static unsigned int tlb1_entry_num;
 
+struct id_mapping {
+       unsigned char id[2][256];
+};
+
+struct shadow_id {
+       void *ref;
+};
+
+static DEFINE_PER_CPU(struct shadow_id[256], host_sid);
+
+static inline int e500_id_create_mapping(unsigned char *entry)
+{
+       unsigned long sid;
+
+       preempt_disable();
+       sid = (unsigned long)++(__get_cpu_var(host_sid)[0].ref);
+       if (sid < 256) {
+               *entry = (unsigned char)sid;
+               __get_cpu_var(host_sid)[sid].ref = entry;
+               return sid;
+       }
+       preempt_enable();
+
+       return -1;
+}
+
+static inline void e500_id_destroy_all(void)
+{
+       preempt_disable();
+       memset(__get_cpu_var(host_sid), 0, sizeof(__get_cpu_var(host_sid)));
+       preempt_enable();
+}
+
+static inline int e500_id_find_mapping(unsigned char *entry)
+{
+       if (*entry && __get_cpu_var(host_sid)[*entry].ref == entry)
+               return *entry;
+       return -1;
+}
+
+static int kvmppc_e500_alloc_idm(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       vcpu_e500->id_mapping =
+           (struct id_mapping *)kzalloc(sizeof(struct id_mapping), GFP_KERNEL);
+       if (vcpu_e500->id_mapping != NULL)
+               return 0;
+       return -1;
+}
+
+static void kvmppc_e500_free_idm(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       kfree(vcpu_e500->id_mapping);
+       vcpu_e500->id_mapping = NULL;
+       return;
+}
+
+static inline void kvmppc_e500_reset_idm(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       memset(vcpu_e500->id_mapping, 0, sizeof(struct id_mapping));
+}
+
+static void inline kvmppc_e500_update_spid(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       unsigned int as = !!(vcpu_e500->vcpu.arch.msr & (MSR_IS | MSR_DS));
+
+       vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, as,
+                       get_cur_pid(&vcpu_e500->vcpu));
+       vcpu_e500->vcpu.arch.swap_pid = kvmppc_e500_get_sid(vcpu_e500, as, 0);
+}
+
+/*
+ * Map guest (vcpu,as,id) to individual shadow id.
+ */
+unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+                                 int as, int gid)
+{
+       struct id_mapping *idm = vcpu_e500->id_mapping;
+       int sid;
+
+       sid = e500_id_find_mapping(&idm->id[as][gid]);
+
+       while (sid <= 0) {
+               /* None mapping yet */
+               sid = e500_id_create_mapping(&idm->id[as][gid]);
+               if(sid <= 0) {
+                       BUG_ON(sid == 0);
+                       _tlbil_all();
+                       e500_id_destroy_all();
+                       kvmppc_e500_update_spid(vcpu_e500);
+               }
+       }
+
+       return sid;
+}
+
 void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -131,11 +226,14 @@ static inline void write_host_tlbe(struct 
kvmppc_vcpu_e500 *vcpu_e500,
 
 void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
 {
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+       /* Shadow PID may be expired */
+       kvmppc_e500_update_spid(vcpu_e500);
 }
 
 void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
 {
-       _tlbil_all();
 }
 
 /* Search the guest TLB for a matching entry. */
@@ -245,10 +343,14 @@ static inline void kvmppc_e500_setup_stlbe(struct 
kvmppc_vcpu_e500 *vcpu_e500,
                u64 gvaddr, struct tlbe *stlbe)
 {
        hpa_t hpaddr = page_to_phys(ref->page);
+       unsigned int stid;
+
+       stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
+                               get_tlb_tid(gtlbe));
 
        /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
        stlbe->mas1 = MAS1_TSIZE(BOOKE_PAGESZ_4K)
-               | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
+               | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
        stlbe->mas2 = (gvaddr & MAS2_EPN)
                | e500_shadow_mas2_attrib(gtlbe->mas2,
                                vcpu_e500->vcpu.arch.msr & MSR_PR);
@@ -315,11 +417,29 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 
*vcpu_e500,
  * proper permission bits. */
 void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
 {
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
        if (usermode) {
-               _tlbil_all();
+               /* clear PID for guest kernel mapping */
+               vcpu->arch.swap_pid = 0;
+       } else {
+               /* set PID for guest kernel mapping
+                * We assume:
+                * 1. AS  = 0 when enter supervise mode
+                * 2. TID = 0 for supervise code/data mappings */
+               vcpu->arch.swap_pid = kvmppc_e500_get_sid(vcpu_e500, 0, 0);
        }
 }
 
+void kvmppc_mmu_as_switch(struct kvm_vcpu *vcpu, u32 new_as)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       u32 pid = get_cur_pid(vcpu);
+
+       vcpu->arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, !!new_as, pid);
+       vcpu->arch.swap_pid = kvmppc_e500_get_sid(vcpu_e500, !!new_as, 0);
+}
+
 static inline int kvmppc_e500_gtlbe_invalidate(
                                struct kvmppc_vcpu_e500 *vcpu_e500,
                                int tlbsel, int esel)
@@ -345,7 +465,9 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 
*vcpu_e500, ulong value)
                for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
 
-       _tlbil_all();
+       /* Reset vcpu shadow id mapping */
+       kvmppc_e500_reset_idm(vcpu_e500);
+       kvmppc_e500_update_spid(vcpu_e500);
 
        return EMULATE_DONE;
 }
@@ -376,7 +498,9 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, 
int rb)
                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
        }
 
-       _tlbil_all();
+       /* Reset vcpu shadow id mapping */
+       kvmppc_e500_reset_idm(vcpu_e500);
+       kvmppc_e500_update_spid(vcpu_e500);
 
        return EMULATE_DONE;
 }
@@ -555,9 +679,6 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
        for (stlbsel = 0; stlbsel < 2; stlbsel++)
                for (i = 0; i < vcpu_e500->guest_tlb_size[stlbsel]; i++)
                        kvmppc_e500_shadow_release(vcpu_e500, stlbsel, i);
-
-       /* discard all guest mapping */
-       _tlbil_all();
 }
 
 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
@@ -629,6 +750,9 @@ void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 
*vcpu_e500)
        tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
        tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
        tlbe->mas7 = 0;
+
+       /* Setup shadow PID before start guest */
+       kvmppc_e500_update_spid(vcpu_e500);
 }
 
 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -657,8 +781,13 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 
*vcpu_e500)
        if (vcpu_e500->shadow_refs[1] == NULL)
                goto err_out_ref0;
 
+       if(kvmppc_e500_alloc_idm(vcpu_e500) < 0)
+               goto err_out_ref1;
+
        return 0;
 
+err_out_ref1:
+       kfree(vcpu_e500->shadow_refs[1]);
 err_out_ref0:
        kfree(vcpu_e500->shadow_refs[0]);
 err_out_guest1:
@@ -671,6 +800,7 @@ err_out:
 
 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
+       kvmppc_e500_free_idm(vcpu_e500);
        kfree(vcpu_e500->shadow_refs[1]);
        kfree(vcpu_e500->shadow_refs[0]);
        kfree(vcpu_e500->guest_tlb[1]);
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 45b064b..5c6e56c 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -55,6 +55,8 @@ extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
 extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
 extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
 extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
+extern unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+                                       int as, int gid);
 
 /* TLB helper functions */
 static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
-- 
1.5.4

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to