Signed-off-by: Liu Yu <[EMAIL PROTECTED]>
---
arch/powerpc/include/asm/kvm_44x.h | 9 +++++
arch/powerpc/include/asm/kvm_host.h | 7 ----
arch/powerpc/include/asm/kvm_ppc.h | 5 +--
arch/powerpc/kvm/44x.c | 22 +++++++++++++-
arch/powerpc/kvm/44x_tlb.c | 29 +++++------------
arch/powerpc/kvm/44x_tlb.h | 19 +++++++++--
arch/powerpc/kvm/booke.c | 49 +++++++++----------------------
arch/powerpc/kvm/booke.h | 8 +++++
arch/powerpc/kvm/booke_interrupts.S | 56 ++++------------------------------
arch/powerpc/kvm/booke_interrupts.h | 54 +++++++++++++++++++++++++++++++++
10 files changed, 139 insertions(+), 119 deletions(-)
create mode 100644 arch/powerpc/kvm/booke_interrupts.h
diff --git a/arch/powerpc/include/asm/kvm_44x.h
b/arch/powerpc/include/asm/kvm_44x.h
index 72e5939..afb24cc 100644
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ b/arch/powerpc/include/asm/kvm_44x.h
@@ -25,6 +25,15 @@
/* XXX Can't include mmu-44x.h because it redefines struct mm_context. */
#define PPC44x_TLB_SIZE 64
+#define BOOKE_INTERRUPT_SIZE 16
+
+struct kvmppc_44x_tlbe {
+ u32 tid; /* Only the low 8 bits are used. */
+ u32 word0;
+ u32 word1;
+ u32 word2;
+};
+
struct kvmppc_vcpu_44x {
/* Unmodified copy of the guest's TLB. */
struct kvmppc_44x_tlbe guest_tlb[PPC44x_TLB_SIZE];
diff --git a/arch/powerpc/include/asm/kvm_host.h
b/arch/powerpc/include/asm/kvm_host.h
index a4a7d5e..2b1a31a 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -64,13 +64,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-struct kvmppc_44x_tlbe {
- u32 tid; /* Only the low 8 bits are used. */
- u32 word0;
- u32 word1;
- u32 word2;
-};
-
struct kvm_arch {
};
diff --git a/arch/powerpc/include/asm/kvm_ppc.h
b/arch/powerpc/include/asm/kvm_ppc.h
index 408a0ca..40cc0bf 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -52,13 +52,12 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run,
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
-extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
- u64 asid, u32 flags);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
/* Core-specific hooks */
+extern void kvmppc_core_mmu_map(struct kvm_vcpu *, u64, gfn_t, int);
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
unsigned int id);
extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
@@ -86,7 +85,7 @@ extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu,
int sprn, int rs);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
-extern int kvmppc_booke_init(void);
+extern int kvmppc_booke_init(unsigned long *, int);
extern void kvmppc_booke_exit(void);
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 22054b1..6d72c9d 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -216,9 +216,29 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
static int kvmppc_44x_init(void)
{
+ unsigned long ivor[BOOKE_INTERRUPT_SIZE];
int r;
- r = kvmppc_booke_init();
+ /* Copy our interrupt handlers to match host IVORs. That way we don't
+ * have to swap the IVORs on every guest/host transition. */
+ ivor[0] = mfspr(SPRN_IVOR0);
+ ivor[1] = mfspr(SPRN_IVOR1);
+ ivor[2] = mfspr(SPRN_IVOR2);
+ ivor[3] = mfspr(SPRN_IVOR3);
+ ivor[4] = mfspr(SPRN_IVOR4);
+ ivor[5] = mfspr(SPRN_IVOR5);
+ ivor[6] = mfspr(SPRN_IVOR6);
+ ivor[7] = mfspr(SPRN_IVOR7);
+ ivor[8] = mfspr(SPRN_IVOR8);
+ ivor[9] = mfspr(SPRN_IVOR9);
+ ivor[10] = mfspr(SPRN_IVOR10);
+ ivor[11] = mfspr(SPRN_IVOR11);
+ ivor[12] = mfspr(SPRN_IVOR12);
+ ivor[13] = mfspr(SPRN_IVOR13);
+ ivor[14] = mfspr(SPRN_IVOR14);
+ ivor[15] = mfspr(SPRN_IVOR15);
+
+ r = kvmppc_booke_init(ivor, sizeof(ivor)/sizeof(ivor[0]));
if (r)
return r;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 0f4296c..17daa56 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -118,30 +118,20 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t
eaddr, unsigned int pid,
return -1;
}
-struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
- gva_t eaddr)
+int kvmppc_core_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
unsigned int as = !!(vcpu->arch.msr & MSR_IS);
- unsigned int index;
- index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
- if (index == -1)
- return NULL;
- return &vcpu_44x->guest_tlb[index];
+ return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
}
-struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
- gva_t eaddr)
+int kvmppc_core_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
unsigned int as = !!(vcpu->arch.msr & MSR_DS);
- unsigned int index;
- index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
- if (index == -1)
- return NULL;
- return &vcpu_44x->guest_tlb[index];
+ return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
}
static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
@@ -173,14 +163,16 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu,
unsigned int i)
/* Caller must ensure that the specified guest TLB entry is safe to insert into
* the shadow TLB. */
-void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
- u32 flags)
+void kvmppc_core_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr,
+ gfn_t gfn, int index)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct page *new_page;
struct kvmppc_44x_tlbe *stlbe;
hpa_t hpaddr;
unsigned int victim;
+ u32 asid = &vcpu_44x->guest_tlb[index].tid;
+ u32 flags = &vcpu_44x->guest_tlb[index].word2;
/* Future optimization: don't overwrite the TLB entry containing the
* current PC (or stack?). */
@@ -317,7 +309,6 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8
rs, u8 ws)
u64 eaddr;
u64 raddr;
u64 asid;
- u32 flags;
struct kvmppc_44x_tlbe *tlbe;
unsigned int index;
@@ -358,13 +349,11 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra,
u8 rs, u8 ws)
if (tlbe_is_host_safe(vcpu, tlbe)) {
eaddr = get_tlb_eaddr(tlbe);
raddr = get_tlb_raddr(tlbe);
- asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
- flags = tlbe->word2 & 0xffff;
/* Create a 4KB mapping on the host. If the guest wanted a
* large page, only the first 4KB is mapped here and the rest
* are mapped on the fly. */
- kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
+ kvmppc_core_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, index);
}
KVMTRACE_5D(GTLB_WRITE, vcpu, index,
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
index b1029af..60ce44b 100644
--- a/arch/powerpc/kvm/44x_tlb.h
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -22,19 +22,21 @@
#include <linux/kvm_host.h>
#include <asm/mmu-44x.h>
+#include <asm/kvm_44x.h>
+extern int kvmppc_core_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern int kvmppc_core_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned int pid, unsigned int as);
-extern struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
- gva_t eaddr);
-extern struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
- gva_t eaddr);
extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
u8 rc);
extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws);
+#define kvmppc_core_deliver_itlb_miss(vcpu)
+#define kvmppc_core_deliver_dtlb_miss(vcpu)
+
/* TLB helper functions */
static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
{
@@ -95,4 +97,13 @@ static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe,
gva_t eaddr)
return get_tlb_raddr(tlbe) | (eaddr & pgmask);
}
+static inline gpa_t index_xlate(const struct kvm_vcpu *vcpu,
+ int index, gva_t eaddr)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[index];
+
+ return tlb_xlate(gtlbe, eaddr);
+}
+
#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ec59a67..d96f793 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -30,7 +30,6 @@
#include <asm/cacheflush.h>
#include "booke.h"
-#include "44x_tlb.h"
unsigned long kvmppc_booke_handlers;
@@ -282,15 +281,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct
kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_DTLB_MISS: {
- struct kvmppc_44x_tlbe *gtlbe;
+ int index;
unsigned long eaddr = vcpu->arch.fault_dear;
gfn_t gfn;
/* Check the guest TLB. */
- gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
- if (!gtlbe) {
+ index = kvmppc_core_dtlb_search(vcpu, eaddr);
+ if (index < 0) {
/* The guest didn't have a mapping for it. */
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_DTLB_MISS);
+ kvmppc_core_deliver_dtlb_miss(vcpu);
vcpu->arch.dear = vcpu->arch.fault_dear;
vcpu->arch.esr = vcpu->arch.fault_esr;
vcpu->stat.dtlb_real_miss_exits++;
@@ -298,7 +298,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu
*vcpu,
break;
}
- vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
+ vcpu->arch.paddr_accessed = index_xlate(vcpu, index, eaddr);
gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
@@ -308,8 +308,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu
*vcpu,
* b) the guest used a large mapping which we're faking
* Either way, we need to satisfy the fault without
* invoking the guest. */
- kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
- gtlbe->word2);
+ kvmppc_core_mmu_map(vcpu, eaddr, gfn, index);
vcpu->stat.dtlb_virt_miss_exits++;
r = RESUME_GUEST;
} else {
@@ -323,24 +322,25 @@ int kvmppc_handle_exit(struct kvm_run *run, struct
kvm_vcpu *vcpu,
}
case BOOKE_INTERRUPT_ITLB_MISS: {
- struct kvmppc_44x_tlbe *gtlbe;
+ int index;
unsigned long eaddr = vcpu->arch.pc;
gfn_t gfn;
r = RESUME_GUEST;
/* Check the guest TLB. */
- gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
- if (!gtlbe) {
+ index = kvmppc_core_itlb_search(vcpu, eaddr);
+ if (index < 0) {
/* The guest didn't have a mapping for it. */
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_ITLB_MISS);
+ kvmppc_core_deliver_itlb_miss(vcpu);
vcpu->stat.itlb_real_miss_exits++;
break;
}
vcpu->stat.itlb_virt_miss_exits++;
- gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
+ gfn = index_xlate(vcpu, index, eaddr) >> PAGE_SHIFT;
if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
/* The guest TLB had a mapping, but the shadow TLB
@@ -349,8 +349,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu
*vcpu,
* b) the guest used a large mapping which we're faking
* Either way, we need to satisfy the fault without
* invoking the guest. */
- kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
- gtlbe->word2);
+ kvmppc_core_mmu_map(vcpu, eaddr, gfn, index);
} else {
/* Guest mapped and leaped at non-RAM! */
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_MACHINE_CHECK);
@@ -494,9 +493,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return kvmppc_core_vcpu_translate(vcpu, tr);
}
-int kvmppc_booke_init(void)
+int kvmppc_booke_init(unsigned long ivor[], int size)
{
- unsigned long ivor[16];
unsigned long max_ivor = 0;
int i;
@@ -509,26 +507,7 @@ int kvmppc_booke_init(void)
/* XXX make sure our handlers are smaller than Linux's */
- /* Copy our interrupt handlers to match host IVORs. That way we don't
- * have to swap the IVORs on every guest/host transition. */
- ivor[0] = mfspr(SPRN_IVOR0);
- ivor[1] = mfspr(SPRN_IVOR1);
- ivor[2] = mfspr(SPRN_IVOR2);
- ivor[3] = mfspr(SPRN_IVOR3);
- ivor[4] = mfspr(SPRN_IVOR4);
- ivor[5] = mfspr(SPRN_IVOR5);
- ivor[6] = mfspr(SPRN_IVOR6);
- ivor[7] = mfspr(SPRN_IVOR7);
- ivor[8] = mfspr(SPRN_IVOR8);
- ivor[9] = mfspr(SPRN_IVOR9);
- ivor[10] = mfspr(SPRN_IVOR10);
- ivor[11] = mfspr(SPRN_IVOR11);
- ivor[12] = mfspr(SPRN_IVOR12);
- ivor[13] = mfspr(SPRN_IVOR13);
- ivor[14] = mfspr(SPRN_IVOR14);
- ivor[15] = mfspr(SPRN_IVOR15);
-
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < size; i++) {
if (ivor[i] > max_ivor)
max_ivor = ivor[i];
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 48d905f..6e58bbe 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -54,4 +54,12 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32
new_msr)
kvm_vcpu_block(vcpu);
}
+#if defined(CONFIG_44x)
+# include "44x_tlb.h"
+#elif defined(CONFIG_FSL_BOOKE)
+# include "e500_tlb.h"
+#else
+# error "unsupported platform"
+#endif
+
#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S
b/arch/powerpc/kvm/booke_interrupts.S
index 8d6929b..f33aa86 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -20,10 +20,11 @@
#include <asm/ppc_asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
-#include <asm/mmu-44x.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+#include "booke_interrupts.h"
+
#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
@@ -174,8 +175,8 @@ _GLOBAL(kvmppc_resume_host)
/* Restore host stack pointer and PID before IVPR, since the host
* exception handlers use them. */
lwz r1, VCPU_HOST_STACK(r4)
- lwz r3, VCPU_HOST_PID(r4)
- mtspr SPRN_PID, r3
+
+ LOAD_PID(VCPU_HOST_PID)
/* Restore host IVPR before re-enabling interrupts. We cheat and know
* that Linux IVPR is always 0xc0000000. */
@@ -330,10 +331,8 @@ _GLOBAL(__kvmppc_vcpu_run)
lightweight_exit:
stw r2, HOST_R2(r1)
- mfspr r3, SPRN_PID
- stw r3, VCPU_HOST_PID(r4)
- lwz r3, VCPU_SHADOW_PID(r4)
- mtspr SPRN_PID, r3
+ SAVE_PID(VCPU_HOST_PID)
+ LOAD_PID(VCPU_SHADOW_PID)
/* Prevent all asynchronous TLB updates. */
mfmsr r5
@@ -342,48 +341,7 @@ lightweight_exit:
andc r6, r5, r6
mtmsr r6
- /* Load the guest mappings, leaving the host's "pinned" kernel mappings
- * in place. */
- mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
- li r5, PPC44x_TLB_SIZE
- lis r5, [EMAIL PROTECTED]
- lwz r5, [EMAIL PROTECTED](r5)
- mtctr r5
- addi r9, r4, -VCPU_TO_44X + VCPU44x_SHADOW_TLB
- addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD
- li r3, 0
-1:
- lbzx r7, r3, r5
- cmpwi r7, 0
- beq 3f
-
- /* Load guest entry. */
- mulli r11, r3, TLBE_BYTES
- add r11, r11, r9
- lwz r7, 0(r11)
- mtspr SPRN_MMUCR, r7
- lwz r7, 4(r11)
- tlbwe r7, r3, PPC44x_TLB_PAGEID
- lwz r7, 8(r11)
- tlbwe r7, r3, PPC44x_TLB_XLAT
- lwz r7, 12(r11)
- tlbwe r7, r3, PPC44x_TLB_ATTRIB
-3:
- addi r3, r3, 1 /* Increment index. */
- bdnz 1b
-
- mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
-
- /* Clear bitmap of modified TLB entries */
- li r5, PPC44x_TLB_SIZE>>2
- mtctr r5
- addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD - 4
- li r6, 0
-1:
- stwu r6, 4(r5)
- bdnz 1b
-
- iccci 0, 0 /* XXX hack */
+ LOAD_GUEST_TLB
/* Load some guest volatiles. */
lwz r0, VCPU_GPR(r0)(r4)
diff --git a/arch/powerpc/kvm/booke_interrupts.h
b/arch/powerpc/kvm/booke_interrupts.h
new file mode 100644
index 0000000..bbd10fd
--- /dev/null
+++ b/arch/powerpc/kvm/booke_interrupts.h
@@ -0,0 +1,54 @@
+#ifndef __KVMPPC_BOOKE_INTERRUPT_H__
+#define __KVMPPC_BOOKE_INTERRUPT_H__
+
+#ifdef CONFIG_44x
+#include <asm/mmu-44x.h>
+
+#define LOAD_PID(addr) \
+ lwz r3, addr(r4); \
+ mtspr SPRN_PID, r3;
+
+#define SAVE_PID(addr) \
+ mfspr r3, SPRN_PID; \
+ stw r3, addr(r4);
+
+ /* Load the guest mappings, leaving the host's "pinned" kernel mappings
+ * in place. */
+#define LOAD_GUEST_TLB \
+ mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ \
+ li r5, PPC44x_TLB_SIZE \
+ lis r5, [EMAIL PROTECTED] \
+ lwz r5, [EMAIL PROTECTED](r5) \
+ mtctr r5 \
+ addi r9, r4, -VCPU_TO_44X + VCPU44x_SHADOW_TLB \
+ addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD \
+ li r3, 0 \
+1: lbzx r7, r3, r5 \
+ cmpwi r7, 0 \
+ beq 3f \
+ /* Load guest entry. */ \
+ mulli r11, r3, TLBE_BYTES \
+ add r11, r11, r9 \
+ lwz r7, 0(r11) \
+ mtspr SPRN_MMUCR, r7 \
+ lwz r7, 4(r11) \
+ tlbwe r7, r3, PPC44x_TLB_PAGEID \
+ lwz r7, 8(r11) \
+ tlbwe r7, r3, PPC44x_TLB_XLAT \
+ lwz r7, 12(r11) \
+ tlbwe r7, r3, PPC44x_TLB_ATTRIB \
+3: addi r3, r3, 1 /* Increment index. */ \
+ bdnz 1b \
+ mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */\
+ /* Clear bitmap of modified TLB entries */ \
+ li r5, PPC44x_TLB_SIZE>>2 \
+ mtctr r5 \
+ addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD - 4 \
+ li r6, 0 \
+1: stwu r6, 4(r5) \
+ bdnz 1b \
+ iccci 0, 0 /* XXX hack */
+
+#endif
+
+#endif /* __KVMPPC_BOOKE_INTERRUPT_H__ */
--
1.5.4
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html