There are quite some pieces in the code that I overlooked that still use
u64s instead of longs. This has two side effects:

  1) Slowness
  2) Breakage

This patch fixes both, enabling me to successfully run a Debian guest
on a G4 iBook in KVM.

Signed-off-by: Alexander Graf <[email protected]>

---

Please apply after the just posted series
---
 arch/powerpc/include/asm/kvm_book3s.h |   17 +++++++++++++++--
 arch/powerpc/include/asm/kvm_host.h   |    6 +++---
 arch/powerpc/kvm/book3s.c             |    6 +++---
 arch/powerpc/kvm/book3s_32_mmu.c      |   20 ++++++++++----------
 arch/powerpc/kvm/book3s_32_mmu_host.c |   25 +++++++++----------------
 arch/powerpc/kvm/book3s_64_mmu.c      |    4 ++--
 arch/powerpc/kvm/book3s_64_mmu_host.c |   14 +++++++-------
 7 files changed, 49 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 9517b8d..19d278f 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -100,6 +100,8 @@ struct kvmppc_vcpu_book3s {
 #define CONTEXT_GUEST          1
 #define CONTEXT_GUEST_END      2
 
+#ifdef CONFIG_PPC_BOOK3S_64
+
 #define VSID_REAL_DR   0x7ffffffffff00000ULL
 #define VSID_REAL_IR   0x7fffffffffe00000ULL
 #define VSID_SPLIT_MASK        0x7fffffffffe00000ULL
@@ -107,9 +109,20 @@ struct kvmppc_vcpu_book3s {
 #define VSID_BAT       0x7fffffffffb00000ULL
 #define VSID_PR                0x8000000000000000ULL
 
-extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
+#else /* Only have 32 bits VSIDs */
+
+#define VSID_REAL_DR   0x7ff00000
+#define VSID_REAL_IR   0x7fe00000
+#define VSID_SPLIT_MASK        0x7fe00000
+#define VSID_REAL      0x7fc00000
+#define VSID_BAT       0x7fb00000
+#define VSID_PR                0x80000000
+
+#endif
+
+extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong 
ea_mask);
 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
-extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 
pa_end);
+extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong 
pa_end);
 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 5a83995..69a9ba2 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -124,9 +124,9 @@ struct kvm_arch {
 };
 
 struct kvmppc_pte {
-       u64 eaddr;
+       ulong eaddr;
        u64 vpage;
-       u64 raddr;
+       ulong raddr;
        bool may_read           : 1;
        bool may_write          : 1;
        bool may_execute        : 1;
@@ -145,7 +145,7 @@ struct kvmppc_mmu {
        int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte 
*pte, bool data);
        void (*reset_msr)(struct kvm_vcpu *vcpu);
        void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
-       int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid);
+       int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, ulong *vsid);
        u64  (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
        bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
 };
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 5805f99..a7de709 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -812,12 +812,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
                         *     so we can't use the NX bit inside the guest. 
Let's cross our fingers,
                         *     that no guest that needs the dcbz hack does NX.
                         */
-                       kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), 
~0xFFFULL);
+                       kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), 
~0xFFFUL);
                        r = RESUME_GUEST;
                } else {
                        vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 
0x58000000;
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
-                       kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), 
~0xFFFULL);
+                       kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), 
~0xFFFUL);
                        r = RESUME_GUEST;
                }
                break;
@@ -843,7 +843,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu 
*vcpu,
                        vcpu->arch.dear = dar;
                        to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
-                       kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
+                       kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
                        r = RESUME_GUEST;
                }
                break;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 48efb37..430524f 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -60,8 +60,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
 
 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
                                          struct kvmppc_pte *pte, bool data);
-static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
-                                            u64 *vsid);
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
+                                            ulong *vsid);
 
 static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t 
eaddr)
 {
@@ -71,14 +71,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s 
*vcpu_book3s, gva_t e
 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
                                         bool data)
 {
-       u64 vsid;
+       ulong vsid;
        struct kvmppc_pte pte;
 
        if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
                return pte.vpage;
 
        kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
-       return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
+       return (((u64)eaddr >> 12) & 0xffff) | ((u64)vsid << 16);
 }
 
 static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
@@ -148,11 +148,11 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu 
*vcpu, gva_t eaddr,
                                    bat->bepi_mask);
                }
                if ((eaddr & bat->bepi_mask) == bat->bepi) {
-                       u64 vsid;
+                       ulong vsid;
                        kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
                                eaddr >> SID_SHIFT, &vsid);
-                       vsid <<= 16;
-                       pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
+                       pte->vpage = (((u64)eaddr >> 12) & 0xffff) |
+                                    ((u64)vsid << 16);
 
                        pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
                        pte->may_read = bat->pp;
@@ -183,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu 
*vcpu, gva_t eaddr,
        struct kvmppc_sr *sre;
        hva_t ptegp;
        u32 pteg[16];
-       u64 ptem = 0;
+       u32 ptem = 0;
        int i;
        int found = 0;
 
@@ -327,8 +327,8 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu 
*vcpu, ulong ea, bool lar
        kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
 }
 
-static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
-                                            u64 *vsid)
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
+                                            ulong *vsid)
 {
        /* In case we only have one of MSR_IR or MSR_DR set, let's put
           that in the real-mode context (and hope RM doesn't access
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c 
b/arch/powerpc/kvm/book3s_32_mmu_host.c
index ce1bfb1..50d11e1 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -77,11 +77,9 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct 
hpte_cache *pte)
                kvm_release_pfn_clean(pte->pfn);
 }
 
-void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 _guest_ea, u64 _ea_mask)
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
 {
        int i;
-       u32 guest_ea = _guest_ea;
-       u32 ea_mask = _ea_mask;
 
        dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
                    vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
@@ -127,7 +125,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 
guest_vp, u64 vp_mask)
        }
 }
 
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
 {
        int i;
 
@@ -179,20 +177,15 @@ static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu 
*vcpu)
 
 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
  * a hash, so we don't waste cycles on looping */
-static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
+static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, ulong gvsid)
 {
-       return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
-                    ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
-                    ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
-                    ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
-                    ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
-                    ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
+       return (u16)(((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
                     ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
                     ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
 }
 
 
-static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
+static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, ulong gvsid)
 {
        struct kvmppc_sid_map *map;
        u16 sid_map_mask;
@@ -251,7 +244,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct 
kvmppc_pte *orig_pte)
 {
        pfn_t hpaddr;
        u64 va;
-       u64 vsid;
+       ulong vsid;
        struct kvmppc_sid_map *map;
        volatile u32 *pteg;
        u32 eaddr = orig_pte->eaddr;
@@ -265,7 +258,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct 
kvmppc_pte *orig_pte)
        /* Get host physical address for gpa */
        hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
        if (kvm_is_error_hva(hpaddr)) {
-               printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n",
+               printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
                                 orig_pte->eaddr);
                return -EINVAL;
        }
@@ -361,7 +354,7 @@ next_pteg:
        return 0;
 }
 
-static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
+static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, ulong 
gvsid)
 {
        struct kvmppc_sid_map *map;
        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -406,7 +399,7 @@ static struct kvmppc_sid_map *create_sid_map(struct 
kvm_vcpu *vcpu, u64 gvsid)
 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
 {
        u32 esid = eaddr >> SID_SHIFT;
-       u64 gvsid;
+       ulong gvsid;
        u32 sr;
        struct kvmppc_sid_map *map;
        struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 12e4c97..65389ba 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -439,8 +439,8 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu 
*vcpu, ulong va,
        kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
 }
 
-static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
-                                            u64 *vsid)
+static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
+                                            ulong *vsid)
 {
        switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
        case 0:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c 
b/arch/powerpc/kvm/book3s_64_mmu_host.c
index b230154..3d8fa50 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -62,7 +62,7 @@ static void invalidate_pte(struct hpte_cache *pte)
                kvm_release_pfn_clean(pte->pfn);
 }
 
-void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
 {
        int i;
 
@@ -110,7 +110,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 
guest_vp, u64 vp_mask)
        }
 }
 
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
 {
        int i;
 
@@ -175,7 +175,7 @@ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
 }
 
 
-static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
+static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, ulong gvsid)
 {
        struct kvmppc_sid_map *map;
        u16 sid_map_mask;
@@ -206,7 +206,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct 
kvmppc_pte *orig_pte)
 {
        pfn_t hpaddr;
        ulong hash, hpteg, va;
-       u64 vsid;
+       ulong vsid;
        int ret;
        int rflags = 0x192;
        int vflags = 0;
@@ -216,7 +216,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct 
kvmppc_pte *orig_pte)
        /* Get host physical address for gpa */
        hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
        if (kvm_is_error_hva(hpaddr)) {
-               printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", 
orig_pte->eaddr);
+               printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 
orig_pte->eaddr);
                return -EINVAL;
        }
        hpaddr <<= PAGE_SHIFT;
@@ -290,7 +290,7 @@ map_again:
        return 0;
 }
 
-static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
+static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, ulong 
gvsid)
 {
        struct kvmppc_sid_map *map;
        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -370,7 +370,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong 
eaddr)
        u64 esid = eaddr >> SID_SHIFT;
        u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
        u64 slb_vsid = SLB_VSID_USER;
-       u64 gvsid;
+       ulong gvsid;
        int slb_index;
        struct kvmppc_sid_map *map;
 
-- 
1.6.0.2

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to