[PATCH v2 1/2] KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core

2014-09-01 Thread Mihai Caraman
ePAPR represents hardware threads as cpu node properties in device tree.
So with existing QEMU, hardware threads are simply exposed as vcpus with
one hardware thread.

The e6500 core shares TLBs between hardware threads. Without tlb write
conditional instruction, the Linux kernel uses per core mechanisms to
protect against duplicate TLB entries.

The guest is unable to detect real siblings threads, so it can't use the
TLB protection mechanism. An alternative solution is to use the hypervisor
to allocate different lpids to guest's vcpus that runs simultaneous on real
siblings threads. On systems with two threads per core this patch halves
the size of the lpid pool that the allocator sees and use two lpids per VM.
Use even numbers to speedup vcpu lpid computation with consecutive lpids
per VM: vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - halve the size of the lpid pool that the allocator sees to get rid of
   ifdefs in the headers and to have lpids correlated.

 arch/powerpc/include/asm/kvm_booke.h |  5 +++-
 arch/powerpc/kvm/e500.h  | 20 
 arch/powerpc/kvm/e500_mmu_host.c | 18 +++---
 arch/powerpc/kvm/e500mc.c| 46 ++--
 4 files changed, 65 insertions(+), 24 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_booke.h 
b/arch/powerpc/include/asm/kvm_booke.h
index f7aa5cc..630134d 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,7 +23,10 @@
 #include linux/types.h
 #include linux/kvm_host.h
 
-/* LPIDs we support with this build -- runtime limit may be lower */
+/*
+ * Number of available lpids. Only the low-order 6 bits of LPID rgister are
+ * implemented on e500mc+ cores.
+ */
 #define KVMPPC_NR_LPIDS64
 
 #define KVMPPC_INST_EHPRIV 0x7c00021c
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index a326178..7b74453 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -22,6 +22,7 @@
 #include linux/kvm_host.h
 #include asm/mmu-book3e.h
 #include asm/tlb.h
+#include asm/cputhreads.h
 
 enum vcpu_ftr {
VCPU_FTR_MMU_V2
@@ -289,6 +290,25 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 
*vcpu_e500);
 #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)   get_tlb_tid(gtlbe)
 #define get_tlbmiss_tid(vcpu)   get_cur_pid(vcpu)
 #define get_tlb_sts(gtlbe)  (gtlbe-mas1  MAS1_TS)
+
+/*
+ * This functios should be called with preemtion disabled
+ * and the returned value is valid only in that context
+ */
+static inline int get_thread_specific_lpid(int vm_lpid)
+{
+   int vcpu_lpid = vm_lpid;
+
+   if (threads_per_core == 2)
+   vcpu_lpid |= smp_processor_id()  1;
+
+   return vcpu_lpid;
+}
+
+static inline int get_lpid(struct kvm_vcpu *vcpu)
+{
+   return get_thread_specific_lpid(vcpu-kvm-arch.lpid);
+}
 #else
 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
  struct kvm_book3e_206_tlb_entry *gtlbe);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 08f14bb..c8795a6 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int 
usermode)
  * writing shadow tlb entry to host TLB
  */
 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
-uint32_t mas0)
+uint32_t mas0,
+uint32_t lpid)
 {
unsigned long flags;
 
@@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct 
kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS3, (u32)stlbe-mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe-mas7_3  32));
 #ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_MAS8, stlbe-mas8);
+   mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
 #endif
asm volatile(isync; tlbwe : : : memory);
 
@@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct 
kvmppc_vcpu_e500 *vcpu_e500,
 
if (tlbsel == 0) {
mas0 = get_host_mas0(stlbe-mas2);
-   __write_host_tlbe(stlbe, mas0);
+   __write_host_tlbe(stlbe, mas0, vcpu_e500-vcpu.kvm-arch.lpid);
} else {
__write_host_tlbe(stlbe,
  MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(sesel)));
+ MAS0_ESEL(to_htlb1_esel(sesel)),
+ vcpu_e500-vcpu.kvm-arch.lpid);
}
 }
 
@@ -176,7 +178,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
   MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
magic.mas8 = 0;
 
-   __write_host_tlbe(magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+   __write_host_tlbe(magic

[PATCH v2 2/2] KVM: PPC: Book3E: Enable e6500 core

2014-09-01 Thread Mihai Caraman
Now that AltiVec and hardware thread support is in place enable e6500 core.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - new patch

 arch/powerpc/kvm/e500mc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index bf8f99f..2fdc872 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -180,6 +180,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec-cpu_name, e5500) == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+   /*
+* Since guests have the priviledge to enable AltiVec, we need AltiVec
+* support in the host to save/restore their context.
+* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+* because it's cleared in the absence of CONFIG_ALTIVEC!
+*/
+   else if (strcmp(cur_cpu_spec-cpu_name, e6500) == 0)
+   r = 0;
+#endif
else
r = -ENOTSUPP;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: PPC: Remove shared defines for SPE and AltiVec interrupts

2014-09-01 Thread Mihai Caraman
We currently decide at compile-time which of the SPE or AltiVec units to
support exclusively. Guard kernel defines with CONFIG_SPE_POSSIBLE and
CONFIG_PPC_E500MC and remove shared defines.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/kvm_asm.h | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_asm.h 
b/arch/powerpc/include/asm/kvm_asm.h
index b8901c4..68644c7 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -53,17 +53,17 @@
 #define BOOKE_INTERRUPT_DEBUG 15
 
 /* E500 */
-#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
-#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
-/*
- * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
- */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
-#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
-#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
-#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
-   BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
+#ifdef CONFIG_SPE_POSSIBLE
+#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA 33
 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
+#endif
+
+#ifdef CONFIG_PPC_E500MC
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 33
+#endif
+
 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
 #define BOOKE_INTERRUPT_DOORBELL 36
 #define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: PPC: Remove the tasklet used by the hrtimer

2014-09-01 Thread Mihai Caraman
Powerpc timer implementation is a copycat version of s390. Now that they removed
the tasklet with commit ea74c0ea1b24a6978a6ebc80ba4dbc7b7848b32d follow this
optimization.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
Signed-off-by: Bogdan Purcareata bogdan.purcare...@freescale.com
---
 arch/powerpc/include/asm/kvm_host.h | 1 -
 arch/powerpc/include/asm/kvm_ppc.h  | 2 +-
 arch/powerpc/kvm/book3s.c   | 4 +---
 arch/powerpc/kvm/booke.c| 4 +---
 arch/powerpc/kvm/powerpc.c  | 8 +---
 5 files changed, 4 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index cc11aed..3502649 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -611,7 +611,6 @@ struct kvm_vcpu_arch {
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
 
struct hrtimer dec_timer;
-   struct tasklet_struct tasklet;
u64 dec_jiffies;
u64 dec_expires;
unsigned long pending_exceptions;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index fb86a22..1117360 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -89,7 +89,7 @@ extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
-extern void kvmppc_decrementer_func(unsigned long data);
+extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 1b5adda..f23b6a5 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -718,10 +718,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu 
*vcpu,
return -EINVAL;
 }
 
-void kvmppc_decrementer_func(unsigned long data)
+void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
 {
-   struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
kvmppc_core_queue_dec(vcpu);
kvm_vcpu_kick(vcpu);
 }
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 831c1b4..a4487f4 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1782,10 +1782,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 
tsr_bits)
update_timer_ints(vcpu);
 }
 
-void kvmppc_decrementer_func(unsigned long data)
+void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
 {
-   struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
if (vcpu-arch.tcr  TCR_ARE) {
vcpu-arch.dec = vcpu-arch.decar;
kvmppc_emulate_dec(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 19d4755..02a6e2d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -658,7 +658,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
/* Make sure we're not using the vcpu anymore */
hrtimer_cancel(vcpu-arch.dec_timer);
-   tasklet_kill(vcpu-arch.tasklet);
 
kvmppc_remove_vcpu_debugfs(vcpu);
 
@@ -684,16 +683,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvmppc_core_pending_dec(vcpu);
 }
 
-/*
- * low level hrtimer wake routine. Because this runs in hardirq context
- * we schedule a tasklet to do the real work.
- */
 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 {
struct kvm_vcpu *vcpu;
 
vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
-   tasklet_schedule(vcpu-arch.tasklet);
+   kvmppc_decrementer_func(vcpu);
 
return HRTIMER_NORESTART;
 }
@@ -703,7 +698,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
int ret;
 
hrtimer_init(vcpu-arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-   tasklet_init(vcpu-arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
vcpu-arch.dec_timer.function = kvmppc_decrementer_wakeup;
vcpu-arch.dec_expires = ~(u64)0;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 1/2] KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core

2014-09-01 Thread Mihai Caraman
ePAPR represents hardware threads as cpu node properties in device tree.
So with existing QEMU, hardware threads are simply exposed as vcpus with
one hardware thread.

The e6500 core shares TLBs between hardware threads. Without tlb write
conditional instruction, the Linux kernel uses per core mechanisms to
protect against duplicate TLB entries.

The guest is unable to detect real siblings threads, so it can't use the
TLB protection mechanism. An alternative solution is to use the hypervisor
to allocate different lpids to guest's vcpus that runs simultaneous on real
siblings threads. On systems with two threads per core this patch halves
the size of the lpid pool that the allocator sees and use two lpids per VM.
Use even numbers to speedup vcpu lpid computation with consecutive lpids
per VM: vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - halve the size of the lpid pool that the allocator sees to get rid of
   ifdefs in the headers and to have lpids correlated.

 arch/powerpc/include/asm/kvm_booke.h |  5 +++-
 arch/powerpc/kvm/e500.h  | 20 
 arch/powerpc/kvm/e500_mmu_host.c | 18 +++---
 arch/powerpc/kvm/e500mc.c| 46 ++--
 4 files changed, 65 insertions(+), 24 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_booke.h 
b/arch/powerpc/include/asm/kvm_booke.h
index f7aa5cc..630134d 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,7 +23,10 @@
 #include linux/types.h
 #include linux/kvm_host.h
 
-/* LPIDs we support with this build -- runtime limit may be lower */
+/*
+ * Number of available lpids. Only the low-order 6 bits of LPID rgister are
+ * implemented on e500mc+ cores.
+ */
 #define KVMPPC_NR_LPIDS64
 
 #define KVMPPC_INST_EHPRIV 0x7c00021c
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index a326178..7b74453 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -22,6 +22,7 @@
 #include linux/kvm_host.h
 #include asm/mmu-book3e.h
 #include asm/tlb.h
+#include asm/cputhreads.h
 
 enum vcpu_ftr {
VCPU_FTR_MMU_V2
@@ -289,6 +290,25 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 
*vcpu_e500);
 #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)   get_tlb_tid(gtlbe)
 #define get_tlbmiss_tid(vcpu)   get_cur_pid(vcpu)
 #define get_tlb_sts(gtlbe)  (gtlbe-mas1  MAS1_TS)
+
+/*
+ * This functios should be called with preemtion disabled
+ * and the returned value is valid only in that context
+ */
+static inline int get_thread_specific_lpid(int vm_lpid)
+{
+   int vcpu_lpid = vm_lpid;
+
+   if (threads_per_core == 2)
+   vcpu_lpid |= smp_processor_id()  1;
+
+   return vcpu_lpid;
+}
+
+static inline int get_lpid(struct kvm_vcpu *vcpu)
+{
+   return get_thread_specific_lpid(vcpu-kvm-arch.lpid);
+}
 #else
 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
  struct kvm_book3e_206_tlb_entry *gtlbe);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 08f14bb..c8795a6 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int 
usermode)
  * writing shadow tlb entry to host TLB
  */
 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
-uint32_t mas0)
+uint32_t mas0,
+uint32_t lpid)
 {
unsigned long flags;
 
@@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct 
kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS3, (u32)stlbe-mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe-mas7_3  32));
 #ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_MAS8, stlbe-mas8);
+   mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
 #endif
asm volatile(isync; tlbwe : : : memory);
 
@@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct 
kvmppc_vcpu_e500 *vcpu_e500,
 
if (tlbsel == 0) {
mas0 = get_host_mas0(stlbe-mas2);
-   __write_host_tlbe(stlbe, mas0);
+   __write_host_tlbe(stlbe, mas0, vcpu_e500-vcpu.kvm-arch.lpid);
} else {
__write_host_tlbe(stlbe,
  MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(sesel)));
+ MAS0_ESEL(to_htlb1_esel(sesel)),
+ vcpu_e500-vcpu.kvm-arch.lpid);
}
 }
 
@@ -176,7 +178,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
   MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
magic.mas8 = 0;
 
-   __write_host_tlbe(magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+   __write_host_tlbe(magic

[PATCH v2 2/2] KVM: PPC: Book3E: Enable e6500 core

2014-09-01 Thread Mihai Caraman
Now that AltiVec and hardware thread support is in place enable e6500 core.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - new patch

 arch/powerpc/kvm/e500mc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index bf8f99f..2fdc872 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -180,6 +180,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec-cpu_name, e5500) == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+   /*
+* Since guests have the priviledge to enable AltiVec, we need AltiVec
+* support in the host to save/restore their context.
+* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+* because it's cleared in the absence of CONFIG_ALTIVEC!
+*/
+   else if (strcmp(cur_cpu_spec-cpu_name, e6500) == 0)
+   r = 0;
+#endif
else
r = -ENOTSUPP;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: PPC: Remove shared defines for SPE and AltiVec interrupts

2014-09-01 Thread Mihai Caraman
We currently decide at compile-time which of the SPE or AltiVec units to
support exclusively. Guard kernel defines with CONFIG_SPE_POSSIBLE and
CONFIG_PPC_E500MC and remove shared defines.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/kvm_asm.h | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_asm.h 
b/arch/powerpc/include/asm/kvm_asm.h
index b8901c4..68644c7 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -53,17 +53,17 @@
 #define BOOKE_INTERRUPT_DEBUG 15
 
 /* E500 */
-#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
-#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
-/*
- * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
- */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
-#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
-#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
-#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
-   BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
+#ifdef CONFIG_SPE_POSSIBLE
+#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA 33
 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
+#endif
+
+#ifdef CONFIG_PPC_E500MC
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 33
+#endif
+
 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
 #define BOOKE_INTERRUPT_DOORBELL 36
 #define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: PPC: Remove the tasklet used by the hrtimer

2014-09-01 Thread Mihai Caraman
Powerpc timer implementation is a copycat version of s390. Now that they removed
the tasklet with commit ea74c0ea1b24a6978a6ebc80ba4dbc7b7848b32d follow this
optimization.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
Signed-off-by: Bogdan Purcareata bogdan.purcare...@freescale.com
---
 arch/powerpc/include/asm/kvm_host.h | 1 -
 arch/powerpc/include/asm/kvm_ppc.h  | 2 +-
 arch/powerpc/kvm/book3s.c   | 4 +---
 arch/powerpc/kvm/booke.c| 4 +---
 arch/powerpc/kvm/powerpc.c  | 8 +---
 5 files changed, 4 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index cc11aed..3502649 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -611,7 +611,6 @@ struct kvm_vcpu_arch {
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
 
struct hrtimer dec_timer;
-   struct tasklet_struct tasklet;
u64 dec_jiffies;
u64 dec_expires;
unsigned long pending_exceptions;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index fb86a22..1117360 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -89,7 +89,7 @@ extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
-extern void kvmppc_decrementer_func(unsigned long data);
+extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 1b5adda..f23b6a5 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -718,10 +718,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu 
*vcpu,
return -EINVAL;
 }
 
-void kvmppc_decrementer_func(unsigned long data)
+void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
 {
-   struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
kvmppc_core_queue_dec(vcpu);
kvm_vcpu_kick(vcpu);
 }
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 831c1b4..a4487f4 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1782,10 +1782,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 
tsr_bits)
update_timer_ints(vcpu);
 }
 
-void kvmppc_decrementer_func(unsigned long data)
+void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
 {
-   struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
if (vcpu-arch.tcr  TCR_ARE) {
vcpu-arch.dec = vcpu-arch.decar;
kvmppc_emulate_dec(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 19d4755..02a6e2d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -658,7 +658,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
/* Make sure we're not using the vcpu anymore */
hrtimer_cancel(vcpu-arch.dec_timer);
-   tasklet_kill(vcpu-arch.tasklet);
 
kvmppc_remove_vcpu_debugfs(vcpu);
 
@@ -684,16 +683,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvmppc_core_pending_dec(vcpu);
 }
 
-/*
- * low level hrtimer wake routine. Because this runs in hardirq context
- * we schedule a tasklet to do the real work.
- */
 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 {
struct kvm_vcpu *vcpu;
 
vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
-   tasklet_schedule(vcpu-arch.tasklet);
+   kvmppc_decrementer_func(vcpu);
 
return HRTIMER_NORESTART;
 }
@@ -703,7 +698,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
int ret;
 
hrtimer_init(vcpu-arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-   tasklet_init(vcpu-arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
vcpu-arch.dec_timer.function = kvmppc_decrementer_wakeup;
vcpu-arch.dec_expires = ~(u64)0;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/2] KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core

2014-08-29 Thread Mihai Caraman
ePAPR represents hardware threads as cpu node properties in device tree.
So with existing QEMU, hardware threads are simply exposed as vcpus with
one hardware thread.

The e6500 core shares TLBs between hardware threads. Without tlb write
conditional instruction, the Linux kernel uses per core mechanisms to
protect against duplicate TLB entries.

The guest is unable to detect real siblings threads, so it can't use a
TLB protection mechanism. An alternative solution is to use the hypervisor
to allocate different lpids to guest's vcpus running simultaneous on real
siblings threads. On systems with two threads per core this patch halves
the size of the lpid pool that the allocator sees and use two lpids per VM.
Use even numbers to speedup vcpu lpid computation with consecutive lpids
per VM: vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/kvm_booke.h |  5 +++-
 arch/powerpc/kvm/e500.h  | 20 
 arch/powerpc/kvm/e500_mmu_host.c | 16 ++---
 arch/powerpc/kvm/e500mc.c| 46 ++--
 4 files changed, 64 insertions(+), 23 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_booke.h 
b/arch/powerpc/include/asm/kvm_booke.h
index f7aa5cc..630134d 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,7 +23,10 @@
 #include linux/types.h
 #include linux/kvm_host.h
 
-/* LPIDs we support with this build -- runtime limit may be lower */
+/*
+ * Number of available lpids. Only the low-order 6 bits of LPID rgister are
+ * implemented on e500mc+ cores.
+ */
 #define KVMPPC_NR_LPIDS64
 
 #define KVMPPC_INST_EHPRIV 0x7c00021c
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index a326178..7b74453 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -22,6 +22,7 @@
 #include linux/kvm_host.h
 #include asm/mmu-book3e.h
 #include asm/tlb.h
+#include asm/cputhreads.h
 
 enum vcpu_ftr {
VCPU_FTR_MMU_V2
@@ -289,6 +290,25 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 
*vcpu_e500);
 #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)   get_tlb_tid(gtlbe)
 #define get_tlbmiss_tid(vcpu)   get_cur_pid(vcpu)
 #define get_tlb_sts(gtlbe)  (gtlbe-mas1  MAS1_TS)
+
+/*
+ * This functios should be called with preemtion disabled
+ * and the returned value is valid only in that context
+ */
+static inline int get_thread_specific_lpid(int vm_lpid)
+{
+   int vcpu_lpid = vm_lpid;
+
+   if (threads_per_core == 2)
+   vcpu_lpid |= smp_processor_id()  1;
+
+   return vcpu_lpid;
+}
+
+static inline int get_lpid(struct kvm_vcpu *vcpu)
+{
+   return get_thread_specific_lpid(vcpu-kvm-arch.lpid);
+}
 #else
 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
  struct kvm_book3e_206_tlb_entry *gtlbe);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 08f14bb..5759608 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int 
usermode)
  * writing shadow tlb entry to host TLB
  */
 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
-uint32_t mas0)
+uint32_t mas0,
+uint32_t lpid)
 {
unsigned long flags;
 
@@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct 
kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS3, (u32)stlbe-mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe-mas7_3  32));
 #ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_MAS8, stlbe-mas8);
+   mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
 #endif
asm volatile(isync; tlbwe : : : memory);
 
@@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct 
kvmppc_vcpu_e500 *vcpu_e500,
 
if (tlbsel == 0) {
mas0 = get_host_mas0(stlbe-mas2);
-   __write_host_tlbe(stlbe, mas0);
+   __write_host_tlbe(stlbe, mas0, vcpu_e500-vcpu.kvm-arch.lpid);
} else {
__write_host_tlbe(stlbe,
  MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(sesel)));
+ MAS0_ESEL(to_htlb1_esel(sesel)),
+ vcpu_e500-vcpu.kvm-arch.lpid);
}
 }
 
@@ -317,10 +319,6 @@ static void kvmppc_e500_setup_stlbe(
stlbe-mas2 = (gvaddr  MAS2_EPN) | (ref-flags  E500_TLB_MAS2_ATTR);
stlbe-mas7_3 = ((u64)pfn  PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe-mas7_3, pr);
-
-#ifdef CONFIG_KVM_BOOKE_HV
-   stlbe-mas8 = MAS8_TGS | vcpu-kvm-arch.lpid;
-#endif
 }
 
 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500

[PATCH 2/2] KVM: PPC: Book3E: Enable e6500 core

2014-08-29 Thread Mihai Caraman
Now that AltiVec and hardware threading support are in place enable e6500 core.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kvm/e500mc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index bf8f99f..2fdc872 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -180,6 +180,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec-cpu_name, e5500) == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+   /*
+* Since guests have the priviledge to enable AltiVec, we need AltiVec
+* support in the host to save/restore their context.
+* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+* because it's cleared in the absence of CONFIG_ALTIVEC!
+*/
+   else if (strcmp(cur_cpu_spec-cpu_name, e6500) == 0)
+   r = 0;
+#endif
else
r = -ENOTSUPP;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/2] KVM: PPC: Book3E: Enable e6500 core

2014-08-29 Thread Mihai Caraman
Now that AltiVec and hardware threading support are in place enable e6500 core.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kvm/e500mc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index bf8f99f..2fdc872 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -180,6 +180,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec-cpu_name, e5500) == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+   /*
+* Since guests have the priviledge to enable AltiVec, we need AltiVec
+* support in the host to save/restore their context.
+* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+* because it's cleared in the absence of CONFIG_ALTIVEC!
+*/
+   else if (strcmp(cur_cpu_spec-cpu_name, e6500) == 0)
+   r = 0;
+#endif
else
r = -ENOTSUPP;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 4/6] KVM: PPC: Move ONE_REG AltiVec support to powerpc

2014-08-20 Thread Mihai Caraman
Move ONE_REG AltiVec support to powerpc generic layer.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - split ONE_REG powerpc generic and ONE_REG AltiVec

v3:
 - make ONE_REG AltiVec support powerpc generic

v2:
 - add comment describing VCSR register representation in KVM vs kernel

 arch/powerpc/include/uapi/asm/kvm.h |  5 +
 arch/powerpc/kvm/book3s.c   | 42 -
 arch/powerpc/kvm/powerpc.c  | 42 +
 3 files changed, 47 insertions(+), 42 deletions(-)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 3ca357a..ab4d473 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -476,6 +476,11 @@ struct kvm_get_htab_header {
 
 /* FP and vector status/control registers */
 #define KVM_REG_PPC_FPSCR  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+/*
+ * VSCR register is documented as a 32-bit register in the ISA, but it can
+ * only be accesses via a vector register. Expose VSCR as a 32-bit register
+ * even though the kernel represents it as a 128-bit vector.
+ */
 #define KVM_REG_PPC_VSCR   (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
 
 /* Virtual processor areas */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 26868e2..1b5adda 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -558,25 +558,6 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_FPSCR:
*val = get_reg_val(id, vcpu-arch.fp.fpscr);
break;
-#ifdef CONFIG_ALTIVEC
-   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   val-vval = vcpu-arch.vr.vr[id - KVM_REG_PPC_VR0];
-   break;
-   case KVM_REG_PPC_VSCR:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   *val = get_reg_val(id, vcpu-arch.vr.vscr.u[3]);
-   break;
-   case KVM_REG_PPC_VRSAVE:
-   *val = get_reg_val(id, vcpu-arch.vrsave);
-   break;
-#endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -653,29 +634,6 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_FPSCR:
vcpu-arch.fp.fpscr = set_reg_val(id, *val);
break;
-#ifdef CONFIG_ALTIVEC
-   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   vcpu-arch.vr.vr[id - KVM_REG_PPC_VR0] = val-vval;
-   break;
-   case KVM_REG_PPC_VSCR:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   vcpu-arch.vr.vscr.u[3] = set_reg_val(id, *val);
-   break;
-   case KVM_REG_PPC_VRSAVE:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   vcpu-arch.vrsave = set_reg_val(id, *val);
-   break;
-#endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1326116..19d4755 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -941,6 +941,25 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
if (r == -EINVAL) {
r = 0;
switch (reg-id) {
+#ifdef CONFIG_ALTIVEC
+   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   val.vval = vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0];
+   break;
+   case KVM_REG_PPC_VSCR:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   val = get_reg_val(reg-id, vcpu-arch.vr.vscr.u[3

[PATCH v4 6/6] KVM: PPC: Booke: Add ONE_REG support for IVPR and IVORs

2014-08-20 Thread Mihai Caraman
Add ONE_REG support for IVPR and IVORs registers. Implement IVPR, IVORs 0-15
and 35 in booke common layer.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - add ONE_REG IVPR
 - use IVPR, IVOR2 and IVOR8 setters
 - add api documentation for ONE_REG IVPR and IVORs

v3:
 - new patch

 Documentation/virtual/kvm/api.txt   |   7 ++
 arch/powerpc/include/uapi/asm/kvm.h |  25 +++
 arch/powerpc/kvm/booke.c| 145 
 arch/powerpc/kvm/e500.c |  42 ++-
 arch/powerpc/kvm/e500mc.c   |  16 
 5 files changed, 233 insertions(+), 2 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt 
b/Documentation/virtual/kvm/api.txt
index beae3fd..cd7b171 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1917,6 +1917,13 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_TM_VSCR   | 32
   PPC   | KVM_REG_PPC_TM_DSCR   | 64
   PPC   | KVM_REG_PPC_TM_TAR| 64
+  PPC   | KVM_REG_PPC_IVPR  | 64
+  PPC   | KVM_REG_PPC_IVOR0 | 32
+  ...
+  PPC   | KVM_REG_PPC_IVOR15| 32
+  PPC   | KVM_REG_PPC_IVOR32| 32
+  ...
+  PPC   | KVM_REG_PPC_IVOR37| 32
 |   |
   MIPS  | KVM_REG_MIPS_R0   | 64
   ...
diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index ab4d473..c97f119 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -564,6 +564,31 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_SPRG9  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
 #define KVM_REG_PPC_DBSR   (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
 
+/* Booke IVPR  IVOR registers */
+#define KVM_REG_PPC_IVPR   (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
+#define KVM_REG_PPC_IVOR0  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbd)
+#define KVM_REG_PPC_IVOR1  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbe)
+#define KVM_REG_PPC_IVOR2  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
+#define KVM_REG_PPC_IVOR3  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc0)
+#define KVM_REG_PPC_IVOR4  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc1)
+#define KVM_REG_PPC_IVOR5  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc2)
+#define KVM_REG_PPC_IVOR6  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc3)
+#define KVM_REG_PPC_IVOR7  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc4)
+#define KVM_REG_PPC_IVOR8  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc5)
+#define KVM_REG_PPC_IVOR9  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc6)
+#define KVM_REG_PPC_IVOR10 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc7)
+#define KVM_REG_PPC_IVOR11 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc8)
+#define KVM_REG_PPC_IVOR12 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc9)
+#define KVM_REG_PPC_IVOR13 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xca)
+#define KVM_REG_PPC_IVOR14 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcb)
+#define KVM_REG_PPC_IVOR15 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcc)
+#define KVM_REG_PPC_IVOR32 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcd)
+#define KVM_REG_PPC_IVOR33 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xce)
+#define KVM_REG_PPC_IVOR34 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcf)
+#define KVM_REG_PPC_IVOR35 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd0)
+#define KVM_REG_PPC_IVOR36 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd1)
+#define KVM_REG_PPC_IVOR37 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd2)
+
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
  */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d4df648..1cb2a2a 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1570,6 +1570,75 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
int r = 0;
 
switch (id) {
+   case KVM_REG_PPC_IVPR:
+   *val = get_reg_val(id, vcpu-arch.ivpr);
+   break;
+   case KVM_REG_PPC_IVOR0:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+   break;
+   case KVM_REG_PPC_IVOR1:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
+   break;
+   case KVM_REG_PPC_IVOR2:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+   break;
+   case KVM_REG_PPC_IVOR3:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
+   break;
+   case KVM_REG_PPC_IVOR4:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
+   break;
+   case KVM_REG_PPC_IVOR5:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
+   break;
+   case

[PATCH v4 0/6] KVM: PPC: Book3e: AltiVec support

2014-08-20 Thread Mihai Caraman
Add KVM Book3e AltiVec support.

Changes:

v4:
 - use CONFIG_SPE_POSSIBLE and a new ifdef for CONFIG_ALTIVEC
 - remove SPE handlers from bookehv
 - split ONE_REG powerpc generic and ONE_REG AltiVec
 - add setters for IVPR, IVOR2 and IVOR8
 - add api documentation for ONE_REG IVPR and IVORs
 - don't enable e6500 core since hardware threads are not yet supported

v3:
 - use distinct SPE/AltiVec exception handlers
 - make ONE_REG AltiVec support powerpc generic
 - add ONE_REG IVORs support

 v2:
 - integrate Paul's FP/VMX/VSX changes that landed in kvm-ppc-queue
   in January and take into account feedback

Mihai Caraman (6):
  KVM: PPC: Book3E: Increase FPU laziness
  KVM: PPC: Book3e: Add AltiVec support
  KVM: PPC: Make ONE_REG powerpc generic
  KVM: PPC: Move ONE_REG AltiVec support to powerpc
  KVM: PPC: Booke: Add setter functions for IVPR, IVOR2 and IVOR8
emulation
  KVM: PPC: Booke: Add ONE_REG support for IVPR and IVORs

 Documentation/virtual/kvm/api.txt |   7 +
 arch/powerpc/include/uapi/asm/kvm.h   |  30 +++
 arch/powerpc/kvm/book3s.c | 151 --
 arch/powerpc/kvm/booke.c  | 371 --
 arch/powerpc/kvm/booke.h  |  43 +---
 arch/powerpc/kvm/booke_emulate.c  |  15 +-
 arch/powerpc/kvm/bookehv_interrupts.S |   9 +-
 arch/powerpc/kvm/e500.c   |  42 +++-
 arch/powerpc/kvm/e500_emulate.c   |  20 ++
 arch/powerpc/kvm/e500mc.c |  18 +-
 arch/powerpc/kvm/powerpc.c|  97 +
 11 files changed, 576 insertions(+), 227 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 3/6] KVM: PPC: Make ONE_REG powerpc generic

2014-08-20 Thread Mihai Caraman
Make ONE_REG generic for server and embedded architectures by moving
kvm_vcpu_ioctl_get_one_reg() and kvm_vcpu_ioctl_set_one_reg() functions
to powerpc layer.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - split ONE_REG powerpc generic and ONE_REG AltiVec

v3:
 - make ONE_REG AltiVec support powerpc generic

v2:
 - add comment describing VCSR register representation in KVM vs kernel

 arch/powerpc/kvm/book3s.c  | 121 +++--
 arch/powerpc/kvm/booke.c   |  91 +-
 arch/powerpc/kvm/powerpc.c |  55 +
 3 files changed, 138 insertions(+), 129 deletions(-)

diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index dd03f6b..26868e2 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -535,33 +535,28 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, 
struct kvm_fpu *fpu)
return -ENOTSUPP;
 }
 
-int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+   union kvmppc_one_reg *val)
 {
-   int r;
-   union kvmppc_one_reg val;
-   int size;
+   int r = 0;
long int i;
 
-   size = one_reg_size(reg-id);
-   if (size  sizeof(val))
-   return -EINVAL;
-
-   r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, reg-id, val);
+   r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, id, val);
if (r == -EINVAL) {
r = 0;
-   switch (reg-id) {
+   switch (id) {
case KVM_REG_PPC_DAR:
-   val = get_reg_val(reg-id, kvmppc_get_dar(vcpu));
+   *val = get_reg_val(id, kvmppc_get_dar(vcpu));
break;
case KVM_REG_PPC_DSISR:
-   val = get_reg_val(reg-id, kvmppc_get_dsisr(vcpu));
+   *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
-   i = reg-id - KVM_REG_PPC_FPR0;
-   val = get_reg_val(reg-id, VCPU_FPR(vcpu, i));
+   i = id - KVM_REG_PPC_FPR0;
+   *val = get_reg_val(id, VCPU_FPR(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
-   val = get_reg_val(reg-id, vcpu-arch.fp.fpscr);
+   *val = get_reg_val(id, vcpu-arch.fp.fpscr);
break;
 #ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -569,110 +564,94 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
-   val.vval = vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0];
+   val-vval = vcpu-arch.vr.vr[id - KVM_REG_PPC_VR0];
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
-   val = get_reg_val(reg-id, vcpu-arch.vr.vscr.u[3]);
+   *val = get_reg_val(id, vcpu-arch.vr.vscr.u[3]);
break;
case KVM_REG_PPC_VRSAVE:
-   val = get_reg_val(reg-id, vcpu-arch.vrsave);
+   *val = get_reg_val(id, vcpu-arch.vrsave);
break;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
-   long int i = reg-id - KVM_REG_PPC_VSR0;
-   val.vsxval[0] = vcpu-arch.fp.fpr[i][0];
-   val.vsxval[1] = vcpu-arch.fp.fpr[i][1];
+   i = id - KVM_REG_PPC_VSR0;
+   val-vsxval[0] = vcpu-arch.fp.fpr[i][0];
+   val-vsxval[1] = vcpu-arch.fp.fpr[i][1];
} else {
r = -ENXIO;
}
break;
 #endif /* CONFIG_VSX */
-   case KVM_REG_PPC_DEBUG_INST: {
-   u32 opcode = INS_TW;
-   r = copy_to_user((u32 __user *)(long)reg-addr,
-opcode, sizeof(u32));
+   case KVM_REG_PPC_DEBUG_INST:
+   *val = get_reg_val(id, INS_TW);
break;
-   }
 #ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
if (!vcpu-arch.icp) {
r = -ENXIO;
break

[PATCH v4 2/6] KVM: PPC: Book3e: Add AltiVec support

2014-08-20 Thread Mihai Caraman
Add AltiVec support in KVM for Book3e. FPU support gracefully reuse host
infrastructure so follow the same approach for AltiVec.

Book3e specification defines shared interrupt numbers for SPE and AltiVec
units. Still SPE is present in e200/e500v2 cores while AltiVec is present in
e6500 core. So we can currently decide at compile-time which of the SPE or
AltiVec units to support exclusively by using CONFIG_SPE_POSSIBLE and
CONFIG_PPC_E500MC defines. As Alexander Graf suggested, keep SPE and AltiVec
exception handlers distinct to improve code readability.

Guests have the privilege to enable AltiVec, so we always need to support
AltiVec in KVM and implicitly in host to reflect interrupts and to save/restore
the unit context. KVM will be loaded on cores with AltiVec unit only if
CONFIG_ALTIVEC is defined. Use this define to guard KVM AltiVec logic.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - use CONFIG_SPE_POSSIBLE and a new ifdef for CONFIG_ALTIVEC
 - remove SPE handlers from bookehv
 - update commit message

v3:
 - use distinct SPE/AltiVec exception handlers

v2:
 - integrate Paul's FP/VMX/VSX changes

 arch/powerpc/kvm/booke.c  | 74 ++-
 arch/powerpc/kvm/booke.h  |  6 +++
 arch/powerpc/kvm/bookehv_interrupts.S |  9 +
 arch/powerpc/kvm/e500_emulate.c   | 20 ++
 4 files changed, 101 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 91e7217..8ace612 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -168,6 +168,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (!(current-thread.regs-msr  MSR_VEC)) {
+   enable_kernel_altivec();
+   load_vr_state(vcpu-arch.vr);
+   current-thread.vr_save_area = vcpu-arch.vr;
+   current-thread.regs-msr |= MSR_VEC;
+   }
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu AltiVec state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (current-thread.regs-msr  MSR_VEC)
+   giveup_altivec(current);
+   current-thread.vr_save_area = NULL;
+   }
+#endif
+}
+
 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
 {
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
@@ -375,9 +409,15 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu 
*vcpu,
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
+#ifdef CONFIG_SPE_POSSIBLE
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
+#endif
+#ifdef CONFIG_ALTIVEC
+   case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
+   case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
+#endif
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
@@ -697,6 +737,17 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   /* Save userspace AltiVec state in stack */
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   enable_kernel_altivec();
+   /*
+* Since we can't trap on MSR_VEC in GS-mode, we consider the guest
+* as always using the AltiVec.
+*/
+   kvmppc_load_guest_altivec(vcpu);
+#endif
+
/* Switch to guest debug context */
debug = vcpu-arch.dbg_reg;
switch_booke_debug_regs(debug);
@@ -719,6 +770,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_save_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   kvmppc_save_guest_altivec(vcpu);
+#endif
+
 out:
vcpu-mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -1025,7 +1080,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST;
break;
-#else
+#elif defined(CONFIG_SPE_POSSIBLE)
case BOOKE_INTERRUPT_SPE_UNAVAIL:
/*
 * Guest wants SPE, but host kernel doesn't support it.  Send
@@ -1046,6 +1101,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST;
break;
+#endif /* CONFIG_SPE_POSSIBLE

[PATCH v4 5/6] KVM: PPC: Booke: Add setter functions for IVPR, IVOR2 and IVOR8 emulation

2014-08-20 Thread Mihai Caraman
Add setter functions for IVPR, IVOR2 and IVOR8 emulation in preparation
for ONE_REG support.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - new patch
 - add api documentation for ONE_REG IVPR and IVORs

 arch/powerpc/kvm/booke.c | 24 
 arch/powerpc/kvm/booke.h |  3 +++
 arch/powerpc/kvm/booke_emulate.c | 15 +++
 3 files changed, 30 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 831c1b4..d4df648 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1782,6 +1782,30 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 
tsr_bits)
update_timer_ints(vcpu);
 }
 
+void kvmppc_set_ivpr(struct kvm_vcpu *vcpu, ulong new_ivpr)
+{
+   vcpu-arch.ivpr = new_ivpr;
+#ifdef CONFIG_KVM_BOOKE_HV
+   mtspr(SPRN_GIVPR, new_ivpr);
+#endif
+}
+
+void kvmppc_set_ivor2(struct kvm_vcpu *vcpu, u32 new_ivor)
+{
+   vcpu-arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = new_ivor;
+#ifdef CONFIG_KVM_BOOKE_HV
+   mtspr(SPRN_GIVOR2, new_ivor);
+#endif
+}
+
+void kvmppc_set_ivor8(struct kvm_vcpu *vcpu, u32 new_ivor)
+{
+   vcpu-arch.ivor[BOOKE_IRQPRIO_SYSCALL] = new_ivor;
+#ifdef CONFIG_KVM_BOOKE_HV
+   mtspr(SPRN_GIVOR8, new_ivor);
+#endif
+}
+
 void kvmppc_decrementer_func(unsigned long data)
 {
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 22ba08e..0242530 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -80,6 +80,9 @@ void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr);
 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
+void kvmppc_set_ivpr(struct kvm_vcpu *vcpu, ulong new_ivpr);
+void kvmppc_set_ivor2(struct kvm_vcpu *vcpu, u32 new_ivor);
+void kvmppc_set_ivor8(struct kvm_vcpu *vcpu, u32 new_ivor);
 
 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
 unsigned int inst, int *advance);
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 92bc668..94c64e3 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -191,10 +191,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int 
sprn, ulong spr_val)
break;
 
case SPRN_IVPR:
-   vcpu-arch.ivpr = spr_val;
-#ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_GIVPR, spr_val);
-#endif
+   kvmppc_set_ivpr(vcpu, spr_val);
break;
case SPRN_IVOR0:
vcpu-arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
@@ -203,10 +200,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int 
sprn, ulong spr_val)
vcpu-arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
break;
case SPRN_IVOR2:
-   vcpu-arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
-#ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_GIVOR2, spr_val);
-#endif
+   kvmppc_set_ivor2(vcpu, spr_val);
break;
case SPRN_IVOR3:
vcpu-arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
@@ -224,10 +218,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int 
sprn, ulong spr_val)
vcpu-arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR8:
-   vcpu-arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
-#ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_GIVOR8, spr_val);
-#endif
+   kvmppc_set_ivor8(vcpu, spr_val);
break;
case SPRN_IVOR9:
vcpu-arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 1/6] KVM: PPC: Book3E: Increase FPU laziness

2014-08-20 Thread Mihai Caraman
Increase FPU laziness by loading the guest state into the unit before entering
the guest instead of doing it on each vcpu schedule. Without this improvement
an interrupt may claim floating point corrupting guest state.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - update commit message

v3:
 - no changes

v2:
 - remove fpu_active
 - add descriptive comments

 arch/powerpc/kvm/booke.c  | 43 ---
 arch/powerpc/kvm/booke.h  | 34 --
 arch/powerpc/kvm/e500mc.c |  2 --
 3 files changed, 36 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 074b7fc..91e7217 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 }
 #endif
 
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (!(current-thread.regs-msr  MSR_FP)) {
+   enable_kernel_fp();
+   load_fp_state(vcpu-arch.fp);
+   current-thread.fp_save_area = vcpu-arch.fp;
+   current-thread.regs-msr |= MSR_FP;
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (current-thread.regs-msr  MSR_FP)
+   giveup_fpu(current);
+   current-thread.fp_save_area = NULL;
+#endif
+}
+
 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 {
 #if defined(CONFIG_PPC_FPU)  !defined(CONFIG_KVM_BOOKE_HV)
@@ -658,12 +692,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
/*
 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
-* as always using the FPU.  Kernel usage of FP (via
-* enable_kernel_fp()) in this thread must not occur while
-* vcpu-fpu_active is set.
+* as always using the FPU.
 */
-   vcpu-fpu_active = 1;
-
kvmppc_load_guest_fp(vcpu);
 #endif
 
@@ -687,8 +717,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
 #ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
-
-   vcpu-fpu_active = 0;
 #endif
 
 out:
@@ -1194,6 +1222,7 @@ out:
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
+   kvmppc_load_guest_fp(vcpu);
}
}
 
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f753543..e73d513 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -116,40 +116,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu 
*vcpu, int sprn,
 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
  ulong *spr_val);
 
-/*
- * Load up guest vcpu FP state if it's needed.
- * It also set the MSR_FP in thread so that host know
- * we're holding FPU, and then host can help to save
- * guest vcpu FP state if other threads require to use FPU.
- * This simulates an FP unavailable fault.
- *
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  !(current-thread.regs-msr  MSR_FP)) {
-   enable_kernel_fp();
-   load_fp_state(vcpu-arch.fp);
-   current-thread.fp_save_area = vcpu-arch.fp;
-   current-thread.regs-msr |= MSR_FP;
-   }
-#endif
-}
-
-/*
- * Save guest vcpu FP state into thread.
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  (current-thread.regs-msr  MSR_FP))
-   giveup_fpu(current);
-   current-thread.fp_save_area = NULL;
-#endif
-}
-
 static inline void kvmppc_clear_dbsr(void)
 {
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 000cf82..4549349 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -145,8 +145,6 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu 
*vcpu, int cpu)
kvmppc_e500_tlbil_all(vcpu_e500);
__get_cpu_var(last_vcpu_of_lpid)[vcpu-kvm-arch.lpid] = vcpu;
}
-
-   kvmppc_load_guest_fp(vcpu);
 }
 
 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
-- 
1.7.11.7

--
To unsubscribe from this list

[PATCH v2 1/2] powerpc/booke: Restrict SPE exception handlers to e200/e500 cores

2014-08-20 Thread Mihai Caraman
SPE exception handlers are now defined for 32-bit e500mc cores even though
SPE unit is not present and CONFIG_SPE is undefined.

Restrict SPE exception handlers to e200/e500 cores adding CONFIG_SPE_POSSIBLE
and consequently guard __stup_ivors and __setup_cpu functions.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
Cc: Scott Wood scottw...@freescale.com
Cc: Alexander Graf ag...@suse.de
---
v2:
 - use CONFIG_PPC_E500MC without CONFIG_E500
 - use elif defined()

 arch/powerpc/kernel/cpu_setup_fsl_booke.S | 12 +++-
 arch/powerpc/kernel/cputable.c|  5 +
 arch/powerpc/kernel/head_fsl_booke.S  | 18 +-
 arch/powerpc/platforms/Kconfig.cputype|  6 +-
 4 files changed, 34 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S 
b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 4f1393d..dddba3e 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -91,6 +91,7 @@ _GLOBAL(setup_altivec_idle)
 
blr
 
+#ifdef CONFIG_PPC_E500MC
 _GLOBAL(__setup_cpu_e6500)
mflrr6
 #ifdef CONFIG_PPC64
@@ -107,14 +108,20 @@ _GLOBAL(__setup_cpu_e6500)
bl  __setup_cpu_e5500
mtlrr6
blr
+#endif /* CONFIG_PPC_E500MC */
 
 #ifdef CONFIG_PPC32
+#ifdef CONFIG_E200
 _GLOBAL(__setup_cpu_e200)
/* enable dedicated debug exception handling resources (Debug APU) */
mfspr   r3,SPRN_HID0
ori r3,r3,HID0_DAPUEN@l
mtspr   SPRN_HID0,r3
b   __setup_e200_ivors
+#endif /* CONFIG_E200 */
+
+#ifdef CONFIG_E500
+#ifndef CONFIG_PPC_E500MC
 _GLOBAL(__setup_cpu_e500v1)
 _GLOBAL(__setup_cpu_e500v2)
mflrr4
@@ -129,6 +136,7 @@ _GLOBAL(__setup_cpu_e500v2)
 #endif
mtlrr4
blr
+#else /* CONFIG_PPC_E500MC */
 _GLOBAL(__setup_cpu_e500mc)
 _GLOBAL(__setup_cpu_e5500)
mflrr5
@@ -159,7 +167,9 @@ _GLOBAL(__setup_cpu_e5500)
 2:
mtlrr5
blr
-#endif
+#endif /* CONFIG_PPC_E500MC */
+#endif /* CONFIG_E500 */
+#endif /* CONFIG_PPC32 */
 
 #ifdef CONFIG_PPC_BOOK3E_64
 _GLOBAL(__restore_cpu_e6500)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 0c15764..df979c5f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2051,6 +2051,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 #endif /* CONFIG_PPC32 */
 #ifdef CONFIG_E500
 #ifdef CONFIG_PPC32
+#ifndef CONFIG_PPC_E500MC
{   /* e500 */
.pvr_mask   = 0x,
.pvr_value  = 0x8020,
@@ -2090,6 +2091,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check  = machine_check_e500,
.platform   = ppc8548,
},
+#else
{   /* e500mc */
.pvr_mask   = 0x,
.pvr_value  = 0x8023,
@@ -2108,7 +2110,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check  = machine_check_e500mc,
.platform   = ppce500mc,
},
+#endif /* CONFIG_PPC_E500MC */
 #endif /* CONFIG_PPC32 */
+#ifdef CONFIG_PPC_E500MC
{   /* e5500 */
.pvr_mask   = 0x,
.pvr_value  = 0x8024,
@@ -2152,6 +2156,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check  = machine_check_e500mc,
.platform   = ppce6500,
},
+#endif /* CONFIG_PPC_E500MC */
 #ifdef CONFIG_PPC32
{   /* default match */
.pvr_mask   = 0x,
diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
index b497188..90f487f 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -613,6 +613,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfspr   r10, SPRN_SPRG_RSCRATCH0
b   InstructionStorage
 
+/* Define SPE handlers for e200 and e500v2 */
 #ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
@@ -622,10 +623,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
b   fast_exception_return
 1: addir3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
-#else
+#elif defined(CONFIG_SPE_POSSIBLE)
EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \
  unknown_exception, EXC_XFER_EE)
-#endif /* CONFIG_SPE */
+#endif /* CONFIG_SPE_POSSIBLE */
 
/* SPE Floating Point Data */
 #ifdef CONFIG_SPE
@@ -635,12 +636,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
/* SPE Floating Point Round */
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
  SPEFloatingPointRoundException, EXC_XFER_EE)
-#else
+#elif defined(CONFIG_SPE_POSSIBLE)
EXCEPTION(0x2040

[PATCH v2 2/2] powerpc/booke: Revert SPE/AltiVec common defines for interrupt numbers

2014-08-20 Thread Mihai Caraman
Book3E specification defines shared interrupt numbers for SPE and AltiVec
units. Still SPE is present in e200/e500v2 cores while AltiVec is present in
e6500 core. So we can currently decide at compile-time which unit to support
exclusively. As Alexander Graf suggested, this will improve code readability
especially in KVM.

Use distinct defines to identify SPE/AltiVec interrupt numbers, reverting
c58ce397 and 6b310fc5 patches that added common defines.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
Cc: Scott Wood scottw...@freescale.com
Cc: Alexander Graf ag...@suse.de
---
 arch/powerpc/kernel/exceptions-64e.S | 4 ++--
 arch/powerpc/kernel/head_fsl_booke.S | 8 
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index bb9cac6..3e68d1c 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -635,7 +635,7 @@ interrupt_end_book3e:
 
 /* Altivec Unavailable Interrupt */
START_EXCEPTION(altivec_unavailable);
-   NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL,
+   NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
PROLOG_ADDITION_NONE)
/* we can probably do a shorter exception entry for that one... */
EXCEPTION_COMMON(0x200)
@@ -658,7 +658,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 /* AltiVec Assist */
START_EXCEPTION(altivec_assist);
NORMAL_EXCEPTION_PROLOG(0x220,
-   BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST,
+   BOOKE_INTERRUPT_ALTIVEC_ASSIST,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x220)
INTS_DISABLE
diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
index 90f487f..fffd1f9 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -617,27 +617,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
 #ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
-   NORMAL_EXCEPTION_PROLOG(SPE_ALTIVEC_UNAVAIL)
+   NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
beq 1f
bl  load_up_spe
b   fast_exception_return
 1: addir3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
 #elif defined(CONFIG_SPE_POSSIBLE)
-   EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \
+   EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
  unknown_exception, EXC_XFER_EE)
 #endif /* CONFIG_SPE_POSSIBLE */
 
/* SPE Floating Point Data */
 #ifdef CONFIG_SPE
-   EXCEPTION(0x2030, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
+   EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
  SPEFloatingPointException, EXC_XFER_EE)
 
/* SPE Floating Point Round */
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
  SPEFloatingPointRoundException, EXC_XFER_EE)
 #elif defined(CONFIG_SPE_POSSIBLE)
-   EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
+   EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
  unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
  unknown_exception, EXC_XFER_EE)
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 0/6] KVM: PPC: Book3e: AltiVec support

2014-08-20 Thread Mihai Caraman
Add KVM Book3e AltiVec support.

Changes:

v4:
 - use CONFIG_SPE_POSSIBLE and a new ifdef for CONFIG_ALTIVEC
 - remove SPE handlers from bookehv
 - split ONE_REG powerpc generic and ONE_REG AltiVec
 - add setters for IVPR, IVOR2 and IVOR8
 - add api documentation for ONE_REG IVPR and IVORs
 - don't enable e6500 core since hardware threads are not yet supported

v3:
 - use distinct SPE/AltiVec exception handlers
 - make ONE_REG AltiVec support powerpc generic
 - add ONE_REG IVORs support

 v2:
 - integrate Paul's FP/VMX/VSX changes that landed in kvm-ppc-queue
   in January and take into account feedback

Mihai Caraman (6):
  KVM: PPC: Book3E: Increase FPU laziness
  KVM: PPC: Book3e: Add AltiVec support
  KVM: PPC: Make ONE_REG powerpc generic
  KVM: PPC: Move ONE_REG AltiVec support to powerpc
  KVM: PPC: Booke: Add setter functions for IVPR, IVOR2 and IVOR8
emulation
  KVM: PPC: Booke: Add ONE_REG support for IVPR and IVORs

 Documentation/virtual/kvm/api.txt |   7 +
 arch/powerpc/include/uapi/asm/kvm.h   |  30 +++
 arch/powerpc/kvm/book3s.c | 151 --
 arch/powerpc/kvm/booke.c  | 371 --
 arch/powerpc/kvm/booke.h  |  43 +---
 arch/powerpc/kvm/booke_emulate.c  |  15 +-
 arch/powerpc/kvm/bookehv_interrupts.S |   9 +-
 arch/powerpc/kvm/e500.c   |  42 +++-
 arch/powerpc/kvm/e500_emulate.c   |  20 ++
 arch/powerpc/kvm/e500mc.c |  18 +-
 arch/powerpc/kvm/powerpc.c|  97 +
 11 files changed, 576 insertions(+), 227 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 6/6] KVM: PPC: Booke: Add ONE_REG support for IVPR and IVORs

2014-08-20 Thread Mihai Caraman
Add ONE_REG support for IVPR and IVORs registers. Implement IVPR, IVORs 0-15
and 35 in booke common layer.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - add ONE_REG IVPR
 - use IVPR, IVOR2 and IVOR8 setters
 - add api documentation for ONE_REG IVPR and IVORs

v3:
 - new patch

 Documentation/virtual/kvm/api.txt   |   7 ++
 arch/powerpc/include/uapi/asm/kvm.h |  25 +++
 arch/powerpc/kvm/booke.c| 145 
 arch/powerpc/kvm/e500.c |  42 ++-
 arch/powerpc/kvm/e500mc.c   |  16 
 5 files changed, 233 insertions(+), 2 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt 
b/Documentation/virtual/kvm/api.txt
index beae3fd..cd7b171 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1917,6 +1917,13 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_TM_VSCR   | 32
   PPC   | KVM_REG_PPC_TM_DSCR   | 64
   PPC   | KVM_REG_PPC_TM_TAR| 64
+  PPC   | KVM_REG_PPC_IVPR  | 64
+  PPC   | KVM_REG_PPC_IVOR0 | 32
+  ...
+  PPC   | KVM_REG_PPC_IVOR15| 32
+  PPC   | KVM_REG_PPC_IVOR32| 32
+  ...
+  PPC   | KVM_REG_PPC_IVOR37| 32
 |   |
   MIPS  | KVM_REG_MIPS_R0   | 64
   ...
diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index ab4d473..c97f119 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -564,6 +564,31 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_SPRG9  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
 #define KVM_REG_PPC_DBSR   (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
 
+/* Booke IVPR  IVOR registers */
+#define KVM_REG_PPC_IVPR   (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
+#define KVM_REG_PPC_IVOR0  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbd)
+#define KVM_REG_PPC_IVOR1  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbe)
+#define KVM_REG_PPC_IVOR2  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
+#define KVM_REG_PPC_IVOR3  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc0)
+#define KVM_REG_PPC_IVOR4  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc1)
+#define KVM_REG_PPC_IVOR5  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc2)
+#define KVM_REG_PPC_IVOR6  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc3)
+#define KVM_REG_PPC_IVOR7  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc4)
+#define KVM_REG_PPC_IVOR8  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc5)
+#define KVM_REG_PPC_IVOR9  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc6)
+#define KVM_REG_PPC_IVOR10 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc7)
+#define KVM_REG_PPC_IVOR11 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc8)
+#define KVM_REG_PPC_IVOR12 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc9)
+#define KVM_REG_PPC_IVOR13 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xca)
+#define KVM_REG_PPC_IVOR14 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcb)
+#define KVM_REG_PPC_IVOR15 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcc)
+#define KVM_REG_PPC_IVOR32 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcd)
+#define KVM_REG_PPC_IVOR33 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xce)
+#define KVM_REG_PPC_IVOR34 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcf)
+#define KVM_REG_PPC_IVOR35 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd0)
+#define KVM_REG_PPC_IVOR36 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd1)
+#define KVM_REG_PPC_IVOR37 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd2)
+
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
  */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d4df648..1cb2a2a 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1570,6 +1570,75 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
int r = 0;
 
switch (id) {
+   case KVM_REG_PPC_IVPR:
+   *val = get_reg_val(id, vcpu-arch.ivpr);
+   break;
+   case KVM_REG_PPC_IVOR0:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+   break;
+   case KVM_REG_PPC_IVOR1:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
+   break;
+   case KVM_REG_PPC_IVOR2:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+   break;
+   case KVM_REG_PPC_IVOR3:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
+   break;
+   case KVM_REG_PPC_IVOR4:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
+   break;
+   case KVM_REG_PPC_IVOR5:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
+   break;
+   case

[PATCH v4 3/6] KVM: PPC: Make ONE_REG powerpc generic

2014-08-20 Thread Mihai Caraman
Make ONE_REG generic for server and embedded architectures by moving
kvm_vcpu_ioctl_get_one_reg() and kvm_vcpu_ioctl_set_one_reg() functions
to powerpc layer.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - split ONE_REG powerpc generic and ONE_REG AltiVec

v3:
 - make ONE_REG AltiVec support powerpc generic

v2:
 - add comment describing VCSR register representation in KVM vs kernel

 arch/powerpc/kvm/book3s.c  | 121 +++--
 arch/powerpc/kvm/booke.c   |  91 +-
 arch/powerpc/kvm/powerpc.c |  55 +
 3 files changed, 138 insertions(+), 129 deletions(-)

diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index dd03f6b..26868e2 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -535,33 +535,28 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, 
struct kvm_fpu *fpu)
return -ENOTSUPP;
 }
 
-int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+   union kvmppc_one_reg *val)
 {
-   int r;
-   union kvmppc_one_reg val;
-   int size;
+   int r = 0;
long int i;
 
-   size = one_reg_size(reg-id);
-   if (size  sizeof(val))
-   return -EINVAL;
-
-   r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, reg-id, val);
+   r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, id, val);
if (r == -EINVAL) {
r = 0;
-   switch (reg-id) {
+   switch (id) {
case KVM_REG_PPC_DAR:
-   val = get_reg_val(reg-id, kvmppc_get_dar(vcpu));
+   *val = get_reg_val(id, kvmppc_get_dar(vcpu));
break;
case KVM_REG_PPC_DSISR:
-   val = get_reg_val(reg-id, kvmppc_get_dsisr(vcpu));
+   *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
-   i = reg-id - KVM_REG_PPC_FPR0;
-   val = get_reg_val(reg-id, VCPU_FPR(vcpu, i));
+   i = id - KVM_REG_PPC_FPR0;
+   *val = get_reg_val(id, VCPU_FPR(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
-   val = get_reg_val(reg-id, vcpu-arch.fp.fpscr);
+   *val = get_reg_val(id, vcpu-arch.fp.fpscr);
break;
 #ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -569,110 +564,94 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
-   val.vval = vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0];
+   val-vval = vcpu-arch.vr.vr[id - KVM_REG_PPC_VR0];
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
-   val = get_reg_val(reg-id, vcpu-arch.vr.vscr.u[3]);
+   *val = get_reg_val(id, vcpu-arch.vr.vscr.u[3]);
break;
case KVM_REG_PPC_VRSAVE:
-   val = get_reg_val(reg-id, vcpu-arch.vrsave);
+   *val = get_reg_val(id, vcpu-arch.vrsave);
break;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
-   long int i = reg-id - KVM_REG_PPC_VSR0;
-   val.vsxval[0] = vcpu-arch.fp.fpr[i][0];
-   val.vsxval[1] = vcpu-arch.fp.fpr[i][1];
+   i = id - KVM_REG_PPC_VSR0;
+   val-vsxval[0] = vcpu-arch.fp.fpr[i][0];
+   val-vsxval[1] = vcpu-arch.fp.fpr[i][1];
} else {
r = -ENXIO;
}
break;
 #endif /* CONFIG_VSX */
-   case KVM_REG_PPC_DEBUG_INST: {
-   u32 opcode = INS_TW;
-   r = copy_to_user((u32 __user *)(long)reg-addr,
-opcode, sizeof(u32));
+   case KVM_REG_PPC_DEBUG_INST:
+   *val = get_reg_val(id, INS_TW);
break;
-   }
 #ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
if (!vcpu-arch.icp) {
r = -ENXIO;
break

[PATCH v4 4/6] KVM: PPC: Move ONE_REG AltiVec support to powerpc

2014-08-20 Thread Mihai Caraman
Move ONE_REG AltiVec support to powerpc generic layer.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - split ONE_REG powerpc generic and ONE_REG AltiVec

v3:
 - make ONE_REG AltiVec support powerpc generic

v2:
 - add comment describing VCSR register representation in KVM vs kernel

 arch/powerpc/include/uapi/asm/kvm.h |  5 +
 arch/powerpc/kvm/book3s.c   | 42 -
 arch/powerpc/kvm/powerpc.c  | 42 +
 3 files changed, 47 insertions(+), 42 deletions(-)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 3ca357a..ab4d473 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -476,6 +476,11 @@ struct kvm_get_htab_header {
 
 /* FP and vector status/control registers */
 #define KVM_REG_PPC_FPSCR  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+/*
+ * VSCR register is documented as a 32-bit register in the ISA, but it can
+ * only be accesses via a vector register. Expose VSCR as a 32-bit register
+ * even though the kernel represents it as a 128-bit vector.
+ */
 #define KVM_REG_PPC_VSCR   (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
 
 /* Virtual processor areas */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 26868e2..1b5adda 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -558,25 +558,6 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_FPSCR:
*val = get_reg_val(id, vcpu-arch.fp.fpscr);
break;
-#ifdef CONFIG_ALTIVEC
-   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   val-vval = vcpu-arch.vr.vr[id - KVM_REG_PPC_VR0];
-   break;
-   case KVM_REG_PPC_VSCR:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   *val = get_reg_val(id, vcpu-arch.vr.vscr.u[3]);
-   break;
-   case KVM_REG_PPC_VRSAVE:
-   *val = get_reg_val(id, vcpu-arch.vrsave);
-   break;
-#endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -653,29 +634,6 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_FPSCR:
vcpu-arch.fp.fpscr = set_reg_val(id, *val);
break;
-#ifdef CONFIG_ALTIVEC
-   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   vcpu-arch.vr.vr[id - KVM_REG_PPC_VR0] = val-vval;
-   break;
-   case KVM_REG_PPC_VSCR:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   vcpu-arch.vr.vscr.u[3] = set_reg_val(id, *val);
-   break;
-   case KVM_REG_PPC_VRSAVE:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   vcpu-arch.vrsave = set_reg_val(id, *val);
-   break;
-#endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1326116..19d4755 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -941,6 +941,25 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
if (r == -EINVAL) {
r = 0;
switch (reg-id) {
+#ifdef CONFIG_ALTIVEC
+   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   val.vval = vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0];
+   break;
+   case KVM_REG_PPC_VSCR:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   val = get_reg_val(reg-id, vcpu-arch.vr.vscr.u[3

[PATCH v4 2/6] KVM: PPC: Book3e: Add AltiVec support

2014-08-20 Thread Mihai Caraman
Add AltiVec support in KVM for Book3e. FPU support gracefully reuse host
infrastructure so follow the same approach for AltiVec.

Book3e specification defines shared interrupt numbers for SPE and AltiVec
units. Still SPE is present in e200/e500v2 cores while AltiVec is present in
e6500 core. So we can currently decide at compile-time which of the SPE or
AltiVec units to support exclusively by using CONFIG_SPE_POSSIBLE and
CONFIG_PPC_E500MC defines. As Alexander Graf suggested, keep SPE and AltiVec
exception handlers distinct to improve code readability.

Guests have the privilege to enable AltiVec, so we always need to support
AltiVec in KVM and implicitly in host to reflect interrupts and to save/restore
the unit context. KVM will be loaded on cores with AltiVec unit only if
CONFIG_ALTIVEC is defined. Use this define to guard KVM AltiVec logic.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - use CONFIG_SPE_POSSIBLE and a new ifdef for CONFIG_ALTIVEC
 - remove SPE handlers from bookehv
 - update commit message

v3:
 - use distinct SPE/AltiVec exception handlers

v2:
 - integrate Paul's FP/VMX/VSX changes

 arch/powerpc/kvm/booke.c  | 74 ++-
 arch/powerpc/kvm/booke.h  |  6 +++
 arch/powerpc/kvm/bookehv_interrupts.S |  9 +
 arch/powerpc/kvm/e500_emulate.c   | 20 ++
 4 files changed, 101 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 91e7217..8ace612 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -168,6 +168,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (!(current-thread.regs-msr  MSR_VEC)) {
+   enable_kernel_altivec();
+   load_vr_state(vcpu-arch.vr);
+   current-thread.vr_save_area = vcpu-arch.vr;
+   current-thread.regs-msr |= MSR_VEC;
+   }
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu AltiVec state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (current-thread.regs-msr  MSR_VEC)
+   giveup_altivec(current);
+   current-thread.vr_save_area = NULL;
+   }
+#endif
+}
+
 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
 {
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
@@ -375,9 +409,15 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu 
*vcpu,
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
+#ifdef CONFIG_SPE_POSSIBLE
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
+#endif
+#ifdef CONFIG_ALTIVEC
+   case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
+   case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
+#endif
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
@@ -697,6 +737,17 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   /* Save userspace AltiVec state in stack */
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   enable_kernel_altivec();
+   /*
+* Since we can't trap on MSR_VEC in GS-mode, we consider the guest
+* as always using the AltiVec.
+*/
+   kvmppc_load_guest_altivec(vcpu);
+#endif
+
/* Switch to guest debug context */
debug = vcpu-arch.dbg_reg;
switch_booke_debug_regs(debug);
@@ -719,6 +770,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_save_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   kvmppc_save_guest_altivec(vcpu);
+#endif
+
 out:
vcpu-mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -1025,7 +1080,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST;
break;
-#else
+#elif defined(CONFIG_SPE_POSSIBLE)
case BOOKE_INTERRUPT_SPE_UNAVAIL:
/*
 * Guest wants SPE, but host kernel doesn't support it.  Send
@@ -1046,6 +1101,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST;
break;
+#endif /* CONFIG_SPE_POSSIBLE

[PATCH v4 1/6] KVM: PPC: Book3E: Increase FPU laziness

2014-08-20 Thread Mihai Caraman
Increase FPU laziness by loading the guest state into the unit before entering
the guest instead of doing it on each vcpu schedule. Without this improvement
an interrupt may claim floating point corrupting guest state.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - update commit message

v3:
 - no changes

v2:
 - remove fpu_active
 - add descriptive comments

 arch/powerpc/kvm/booke.c  | 43 ---
 arch/powerpc/kvm/booke.h  | 34 --
 arch/powerpc/kvm/e500mc.c |  2 --
 3 files changed, 36 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 074b7fc..91e7217 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 }
 #endif
 
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (!(current-thread.regs-msr  MSR_FP)) {
+   enable_kernel_fp();
+   load_fp_state(vcpu-arch.fp);
+   current-thread.fp_save_area = vcpu-arch.fp;
+   current-thread.regs-msr |= MSR_FP;
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (current-thread.regs-msr  MSR_FP)
+   giveup_fpu(current);
+   current-thread.fp_save_area = NULL;
+#endif
+}
+
 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 {
 #if defined(CONFIG_PPC_FPU)  !defined(CONFIG_KVM_BOOKE_HV)
@@ -658,12 +692,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
/*
 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
-* as always using the FPU.  Kernel usage of FP (via
-* enable_kernel_fp()) in this thread must not occur while
-* vcpu-fpu_active is set.
+* as always using the FPU.
 */
-   vcpu-fpu_active = 1;
-
kvmppc_load_guest_fp(vcpu);
 #endif
 
@@ -687,8 +717,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
 #ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
-
-   vcpu-fpu_active = 0;
 #endif
 
 out:
@@ -1194,6 +1222,7 @@ out:
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
+   kvmppc_load_guest_fp(vcpu);
}
}
 
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f753543..e73d513 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -116,40 +116,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu 
*vcpu, int sprn,
 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
  ulong *spr_val);
 
-/*
- * Load up guest vcpu FP state if it's needed.
- * It also set the MSR_FP in thread so that host know
- * we're holding FPU, and then host can help to save
- * guest vcpu FP state if other threads require to use FPU.
- * This simulates an FP unavailable fault.
- *
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  !(current-thread.regs-msr  MSR_FP)) {
-   enable_kernel_fp();
-   load_fp_state(vcpu-arch.fp);
-   current-thread.fp_save_area = vcpu-arch.fp;
-   current-thread.regs-msr |= MSR_FP;
-   }
-#endif
-}
-
-/*
- * Save guest vcpu FP state into thread.
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  (current-thread.regs-msr  MSR_FP))
-   giveup_fpu(current);
-   current-thread.fp_save_area = NULL;
-#endif
-}
-
 static inline void kvmppc_clear_dbsr(void)
 {
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 000cf82..4549349 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -145,8 +145,6 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu 
*vcpu, int cpu)
kvmppc_e500_tlbil_all(vcpu_e500);
__get_cpu_var(last_vcpu_of_lpid)[vcpu-kvm-arch.lpid] = vcpu;
}
-
-   kvmppc_load_guest_fp(vcpu);
 }
 
 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
-- 
1.7.11.7

--
To unsubscribe from this list

[PATCH v4 5/6] KVM: PPC: Booke: Add setter functions for IVPR, IVOR2 and IVOR8 emulation

2014-08-20 Thread Mihai Caraman
Add setter functions for IVPR, IVOR2 and IVOR8 emulation in preparation
for ONE_REG support.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - new patch
 - add api documentation for ONE_REG IVPR and IVORs

 arch/powerpc/kvm/booke.c | 24 
 arch/powerpc/kvm/booke.h |  3 +++
 arch/powerpc/kvm/booke_emulate.c | 15 +++
 3 files changed, 30 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 831c1b4..d4df648 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1782,6 +1782,30 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 
tsr_bits)
update_timer_ints(vcpu);
 }
 
+void kvmppc_set_ivpr(struct kvm_vcpu *vcpu, ulong new_ivpr)
+{
+   vcpu-arch.ivpr = new_ivpr;
+#ifdef CONFIG_KVM_BOOKE_HV
+   mtspr(SPRN_GIVPR, new_ivpr);
+#endif
+}
+
+void kvmppc_set_ivor2(struct kvm_vcpu *vcpu, u32 new_ivor)
+{
+   vcpu-arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = new_ivor;
+#ifdef CONFIG_KVM_BOOKE_HV
+   mtspr(SPRN_GIVOR2, new_ivor);
+#endif
+}
+
+void kvmppc_set_ivor8(struct kvm_vcpu *vcpu, u32 new_ivor)
+{
+   vcpu-arch.ivor[BOOKE_IRQPRIO_SYSCALL] = new_ivor;
+#ifdef CONFIG_KVM_BOOKE_HV
+   mtspr(SPRN_GIVOR8, new_ivor);
+#endif
+}
+
 void kvmppc_decrementer_func(unsigned long data)
 {
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 22ba08e..0242530 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -80,6 +80,9 @@ void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr);
 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
+void kvmppc_set_ivpr(struct kvm_vcpu *vcpu, ulong new_ivpr);
+void kvmppc_set_ivor2(struct kvm_vcpu *vcpu, u32 new_ivor);
+void kvmppc_set_ivor8(struct kvm_vcpu *vcpu, u32 new_ivor);
 
 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
 unsigned int inst, int *advance);
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 92bc668..94c64e3 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -191,10 +191,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int 
sprn, ulong spr_val)
break;
 
case SPRN_IVPR:
-   vcpu-arch.ivpr = spr_val;
-#ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_GIVPR, spr_val);
-#endif
+   kvmppc_set_ivpr(vcpu, spr_val);
break;
case SPRN_IVOR0:
vcpu-arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
@@ -203,10 +200,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int 
sprn, ulong spr_val)
vcpu-arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
break;
case SPRN_IVOR2:
-   vcpu-arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
-#ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_GIVOR2, spr_val);
-#endif
+   kvmppc_set_ivor2(vcpu, spr_val);
break;
case SPRN_IVOR3:
vcpu-arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
@@ -224,10 +218,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int 
sprn, ulong spr_val)
vcpu-arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR8:
-   vcpu-arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
-#ifdef CONFIG_KVM_BOOKE_HV
-   mtspr(SPRN_GIVOR8, spr_val);
-#endif
+   kvmppc_set_ivor8(vcpu, spr_val);
break;
case SPRN_IVOR9:
vcpu-arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[PATCH] KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core

2014-08-06 Thread Mihai Caraman
ePAPR represents hardware threads as cpu node properties in device tree.
So with existing QEMU, hardware threads are simply exposed as vcpus with
one hardware thread.

The e6500 core shares TLBs between hardware threads. Without tlb write
conditional instruction, the Linux kernel uses per core mechanisms to
protect against duplicate TLB entries.

The guest is unable to detect real siblings threads, so it can't use a
TLB protection mechanism. An alternative solution is to use the hypervisor
to allocate different lpids to guest's vcpus running simultaneous on real
siblings threads. This patch moves lpid to vcpu level and allocates a pool
of lpids (equal to the number of threads per core) per VM.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 Please rebase this patch before
[PATCH v3 5/5] KVM: PPC: Book3E: Enable e6500 core
 to proper handle SMP guests.

 arch/powerpc/include/asm/kvm_host.h |  5 
 arch/powerpc/kernel/asm-offsets.c   |  4 +++
 arch/powerpc/kvm/e500_mmu_host.c| 15 +-
 arch/powerpc/kvm/e500mc.c   | 55 +
 4 files changed, 55 insertions(+), 24 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 98d9dd5..1b0bb4a 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -227,7 +227,11 @@ struct kvm_arch_memory_slot {
 };
 
 struct kvm_arch {
+#ifdef CONFIG_KVM_BOOKE_HV
+   unsigned int lpid_pool[2];
+#else
unsigned int lpid;
+#endif
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long hpt_virt;
struct revmap_entry *revmap;
@@ -435,6 +439,7 @@ struct kvm_vcpu_arch {
u32 eplc;
u32 epsc;
u32 oldpir;
+   u32 lpid;
 #endif
 
 #if defined(CONFIG_BOOKE)
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index ab9ae04..5a30b87 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -483,7 +483,11 @@ int main(void)
DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
 
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+#ifdef CONFIG_KVM_BOOKE_HV
+   DEFINE(KVM_LPID, offsetof(struct kvm_vcpu, arch.lpid));
+#else
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+#endif
 
/* book3s */
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 4150826..a233cc6 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int 
usermode)
  * writing shadow tlb entry to host TLB
  */
 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
-uint32_t mas0)
+uint32_t mas0, uint32_t *lpid)
 {
unsigned long flags;
 
@@ -80,6 +80,8 @@ static inline void __write_host_tlbe(struct 
kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS3, (u32)stlbe-mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe-mas7_3  32));
 #ifdef CONFIG_KVM_BOOKE_HV
+   /* populate mas8 with latest LPID */
+   stlbe-mas8 = MAS8_TGS | *lpid;
mtspr(SPRN_MAS8, stlbe-mas8);
 #endif
asm volatile(isync; tlbwe : : : memory);
@@ -129,11 +131,12 @@ static inline void write_host_tlbe(struct 
kvmppc_vcpu_e500 *vcpu_e500,
 
if (tlbsel == 0) {
mas0 = get_host_mas0(stlbe-mas2);
-   __write_host_tlbe(stlbe, mas0);
+   __write_host_tlbe(stlbe, mas0, vcpu_e500-vcpu.arch.lpid);
} else {
__write_host_tlbe(stlbe,
  MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(sesel)));
+ MAS0_ESEL(to_htlb1_esel(sesel)),
+ vcpu_e500-vcpu.arch.lpid);
}
 }
 
@@ -318,9 +321,7 @@ static void kvmppc_e500_setup_stlbe(
stlbe-mas7_3 = ((u64)pfn  PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe-mas7_3, pr);
 
-#ifdef CONFIG_KVM_BOOKE_HV
-   stlbe-mas8 = MAS8_TGS | vcpu-kvm-arch.lpid;
-#endif
+   /* Set mas8 when executing tlbwe since LPID can change dynamically */
 }
 
 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -632,7 +633,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum 
instruction_type type,
 
local_irq_save(flags);
mtspr(SPRN_MAS6, (vcpu-arch.pid  MAS6_SPID_SHIFT) | addr_space);
-   mtspr(SPRN_MAS5, MAS5_SGS | vcpu-kvm-arch.lpid);
+   mtspr(SPRN_MAS5, MAS5_SGS | vcpu-arch.lpid);
asm volatile(tlbsx 0, %[geaddr]\n : :
 [geaddr] r (geaddr));
mtspr(SPRN_MAS5, 0);
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index aa48dc3..c0a0d9d 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -24,6

[PATCH 2/2] powerpc/booke: Revert SPE/AltiVec common defines for interrupt numbers

2014-08-06 Thread Mihai Caraman
Though SPE/AltiVec shares interrupts numbers on BookE cores, use distinct
defines to identify these numbers. This improves code readability especially
in KVM.

Revert c58ce397 and 6b310fc5 patches that added common defines.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kernel/exceptions-64e.S | 4 ++--
 arch/powerpc/kernel/head_fsl_booke.S | 8 
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index bb9cac6..3e68d1c 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -635,7 +635,7 @@ interrupt_end_book3e:
 
 /* Altivec Unavailable Interrupt */
START_EXCEPTION(altivec_unavailable);
-   NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL,
+   NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
PROLOG_ADDITION_NONE)
/* we can probably do a shorter exception entry for that one... */
EXCEPTION_COMMON(0x200)
@@ -658,7 +658,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 /* AltiVec Assist */
START_EXCEPTION(altivec_assist);
NORMAL_EXCEPTION_PROLOG(0x220,
-   BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST,
+   BOOKE_INTERRUPT_ALTIVEC_ASSIST,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x220)
INTS_DISABLE
diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
index 4f8930f..7ac2dbb 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -617,27 +617,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
 #ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
-   NORMAL_EXCEPTION_PROLOG(SPE_ALTIVEC_UNAVAIL)
+   NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
beq 1f
bl  load_up_spe
b   fast_exception_return
 1: addir3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
 #elif CONFIG_SPE_POSSIBLE
-   EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \
+   EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
  unknown_exception, EXC_XFER_EE)
 #endif /* CONFIG_SPE_POSSIBLE */
 
/* SPE Floating Point Data */
 #ifdef CONFIG_SPE
-   EXCEPTION(0x2030, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
+   EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
  SPEFloatingPointException, EXC_XFER_EE)
 
/* SPE Floating Point Round */
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
  SPEFloatingPointRoundException, EXC_XFER_EE)
 #elif CONFIG_SPE_POSSIBLE
-   EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
+   EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
  unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
  unknown_exception, EXC_XFER_EE)
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/2] powerpc/booke: Restrict SPE exception handlers to e200/e500 cores

2014-08-06 Thread Mihai Caraman
SPE exception handlers are now defined for 32-bit e500mc cores even though
SPE unit is not present and CONFIG_SPE is undefined.

Restrict SPE exception handlers to e200/e500 cores adding CONFIG_SPE_POSSIBLE
and consequently guard __stup_ivors and __setup_cpu functions.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kernel/cpu_setup_fsl_booke.S | 12 +++-
 arch/powerpc/kernel/cputable.c|  5 +
 arch/powerpc/kernel/head_fsl_booke.S  | 18 +-
 arch/powerpc/platforms/Kconfig.cputype|  6 +-
 4 files changed, 34 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S 
b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 4f1393d..44bb2c9 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -91,6 +91,7 @@ _GLOBAL(setup_altivec_idle)
 
blr
 
+#if defined(CONFIG_E500)  defined(CONFIG_PPC_E500MC)
 _GLOBAL(__setup_cpu_e6500)
mflrr6
 #ifdef CONFIG_PPC64
@@ -107,14 +108,20 @@ _GLOBAL(__setup_cpu_e6500)
bl  __setup_cpu_e5500
mtlrr6
blr
+#endif /* CONFIG_E500  CONFIG_PPC_E500MC */
 
 #ifdef CONFIG_PPC32
+#ifdef CONFIG_E200
 _GLOBAL(__setup_cpu_e200)
/* enable dedicated debug exception handling resources (Debug APU) */
mfspr   r3,SPRN_HID0
ori r3,r3,HID0_DAPUEN@l
mtspr   SPRN_HID0,r3
b   __setup_e200_ivors
+#endif /* CONFIG_E200 */
+
+#ifdef CONFIG_E500
+#ifndef CONFIG_PPC_E500MC
 _GLOBAL(__setup_cpu_e500v1)
 _GLOBAL(__setup_cpu_e500v2)
mflrr4
@@ -129,6 +136,7 @@ _GLOBAL(__setup_cpu_e500v2)
 #endif
mtlrr4
blr
+#else /* CONFIG_PPC_E500MC */
 _GLOBAL(__setup_cpu_e500mc)
 _GLOBAL(__setup_cpu_e5500)
mflrr5
@@ -159,7 +167,9 @@ _GLOBAL(__setup_cpu_e5500)
 2:
mtlrr5
blr
-#endif
+#endif /* CONFIG_PPC_E500MC */
+#endif /* CONFIG_E500 */
+#endif /* CONFIG_PPC32 */
 
 #ifdef CONFIG_PPC_BOOK3E_64
 _GLOBAL(__restore_cpu_e6500)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b..c98719f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2031,6 +2031,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 #endif /* CONFIG_PPC32 */
 #ifdef CONFIG_E500
 #ifdef CONFIG_PPC32
+#ifndef CONFIG_PPC_E500MC
{   /* e500 */
.pvr_mask   = 0x,
.pvr_value  = 0x8020,
@@ -2070,6 +2071,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check  = machine_check_e500,
.platform   = ppc8548,
},
+#else
{   /* e500mc */
.pvr_mask   = 0x,
.pvr_value  = 0x8023,
@@ -2088,7 +2090,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check  = machine_check_e500mc,
.platform   = ppce500mc,
},
+#endif /* CONFIG_PPC_E500MC */
 #endif /* CONFIG_PPC32 */
+#ifdef CONFIG_PPC_E500MC
{   /* e5500 */
.pvr_mask   = 0x,
.pvr_value  = 0x8024,
@@ -2132,6 +2136,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check  = machine_check_e500mc,
.platform   = ppce6500,
},
+#endif /* CONFIG_PPC_E500MC */
 #ifdef CONFIG_PPC32
{   /* default match */
.pvr_mask   = 0x,
diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
index b497188..4f8930f 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -613,6 +613,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfspr   r10, SPRN_SPRG_RSCRATCH0
b   InstructionStorage
 
+/* Define SPE handlers for e200 and e500v2 */
 #ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
@@ -622,10 +623,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
b   fast_exception_return
 1: addir3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
-#else
+#elif CONFIG_SPE_POSSIBLE
EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \
  unknown_exception, EXC_XFER_EE)
-#endif /* CONFIG_SPE */
+#endif /* CONFIG_SPE_POSSIBLE */
 
/* SPE Floating Point Data */
 #ifdef CONFIG_SPE
@@ -635,12 +636,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
/* SPE Floating Point Round */
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
  SPEFloatingPointRoundException, EXC_XFER_EE)
-#else
+#elif CONFIG_SPE_POSSIBLE
EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
  unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2050, SPE_FP_ROUND

[PATCH] KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core

2014-08-06 Thread Mihai Caraman
ePAPR represents hardware threads as cpu node properties in device tree.
So with existing QEMU, hardware threads are simply exposed as vcpus with
one hardware thread.

The e6500 core shares TLBs between hardware threads. Without tlb write
conditional instruction, the Linux kernel uses per core mechanisms to
protect against duplicate TLB entries.

The guest is unable to detect real siblings threads, so it can't use a
TLB protection mechanism. An alternative solution is to use the hypervisor
to allocate different lpids to guest's vcpus running simultaneous on real
siblings threads. This patch moves lpid to vcpu level and allocates a pool
of lpids (equal to the number of threads per core) per VM.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 Please rebase this patch before
[PATCH v3 5/5] KVM: PPC: Book3E: Enable e6500 core
 to proper handle SMP guests.

 arch/powerpc/include/asm/kvm_host.h |  5 
 arch/powerpc/kernel/asm-offsets.c   |  4 +++
 arch/powerpc/kvm/e500_mmu_host.c| 15 +-
 arch/powerpc/kvm/e500mc.c   | 55 +
 4 files changed, 55 insertions(+), 24 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 98d9dd5..1b0bb4a 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -227,7 +227,11 @@ struct kvm_arch_memory_slot {
 };
 
 struct kvm_arch {
+#ifdef CONFIG_KVM_BOOKE_HV
+   unsigned int lpid_pool[2];
+#else
unsigned int lpid;
+#endif
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long hpt_virt;
struct revmap_entry *revmap;
@@ -435,6 +439,7 @@ struct kvm_vcpu_arch {
u32 eplc;
u32 epsc;
u32 oldpir;
+   u32 lpid;
 #endif
 
 #if defined(CONFIG_BOOKE)
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index ab9ae04..5a30b87 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -483,7 +483,11 @@ int main(void)
DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
 
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+#ifdef CONFIG_KVM_BOOKE_HV
+   DEFINE(KVM_LPID, offsetof(struct kvm_vcpu, arch.lpid));
+#else
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+#endif
 
/* book3s */
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 4150826..a233cc6 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int 
usermode)
  * writing shadow tlb entry to host TLB
  */
 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
-uint32_t mas0)
+uint32_t mas0, uint32_t *lpid)
 {
unsigned long flags;
 
@@ -80,6 +80,8 @@ static inline void __write_host_tlbe(struct 
kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS3, (u32)stlbe-mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe-mas7_3  32));
 #ifdef CONFIG_KVM_BOOKE_HV
+   /* populate mas8 with latest LPID */
+   stlbe-mas8 = MAS8_TGS | *lpid;
mtspr(SPRN_MAS8, stlbe-mas8);
 #endif
asm volatile(isync; tlbwe : : : memory);
@@ -129,11 +131,12 @@ static inline void write_host_tlbe(struct 
kvmppc_vcpu_e500 *vcpu_e500,
 
if (tlbsel == 0) {
mas0 = get_host_mas0(stlbe-mas2);
-   __write_host_tlbe(stlbe, mas0);
+   __write_host_tlbe(stlbe, mas0, vcpu_e500-vcpu.arch.lpid);
} else {
__write_host_tlbe(stlbe,
  MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(sesel)));
+ MAS0_ESEL(to_htlb1_esel(sesel)),
+ vcpu_e500-vcpu.arch.lpid);
}
 }
 
@@ -318,9 +321,7 @@ static void kvmppc_e500_setup_stlbe(
stlbe-mas7_3 = ((u64)pfn  PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe-mas7_3, pr);
 
-#ifdef CONFIG_KVM_BOOKE_HV
-   stlbe-mas8 = MAS8_TGS | vcpu-kvm-arch.lpid;
-#endif
+   /* Set mas8 when executing tlbwe since LPID can change dynamically */
 }
 
 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -632,7 +633,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum 
instruction_type type,
 
local_irq_save(flags);
mtspr(SPRN_MAS6, (vcpu-arch.pid  MAS6_SPID_SHIFT) | addr_space);
-   mtspr(SPRN_MAS5, MAS5_SGS | vcpu-kvm-arch.lpid);
+   mtspr(SPRN_MAS5, MAS5_SGS | vcpu-arch.lpid);
asm volatile(tlbsx 0, %[geaddr]\n : :
 [geaddr] r (geaddr));
mtspr(SPRN_MAS5, 0);
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index aa48dc3..c0a0d9d 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -24,6

[PATCH v3 0/5] KVM: PPC: Book3e: AltiVec support

2014-08-05 Thread Mihai Caraman
Add KVM Book3e AltiVec support and enable e6500 core.

Changes:

v3:
 - use distinct SPE/AltiVec exception handlers
 - make ONE_REG AltiVec support powerpc generic
 - add ONE_REG IVORs support

v2:
 - integrate Paul's FP/VMX/VSX changes that landed in kvm-ppc-queue
   in January and take into account feedback

Mihai Caraman (5):
  KVM: PPC: Book3e: Increase FPU laziness
  KVM: PPC: Book3e: Add AltiVec support
  KVM: PPC: Move ONE_REG AltiVec support to powerpc
  KVM: PPC: Booke: Add ONE_REG IVORs support
  KVM: PPC: Book3e: Enable e6500 core

 arch/powerpc/include/uapi/asm/kvm.h   |  29 +++
 arch/powerpc/kvm/book3s.c | 151 +---
 arch/powerpc/kvm/booke.c  | 331 --
 arch/powerpc/kvm/booke.h  |  39 +---
 arch/powerpc/kvm/bookehv_interrupts.S |  10 +-
 arch/powerpc/kvm/e500.c   |  42 -
 arch/powerpc/kvm/e500_emulate.c   |  18 ++
 arch/powerpc/kvm/e500mc.c |  44 -
 arch/powerpc/kvm/powerpc.c|  97 ++
 9 files changed, 554 insertions(+), 207 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 2/5] KVM: PPC: Book3e: Add AltiVec support

2014-08-05 Thread Mihai Caraman
Add KVM Book3e AltiVec support. KVM Book3e FPU support gracefully reuse host
infrastructure so follow the same approach for AltiVec.

Keep SPE/AltiVec exception handlers distinct using CONFIG_KVM_E500V2.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v3:
 - use distinct SPE/AltiVec exception handlers

v2:
 - integrate Paul's FP/VMX/VSX changes

 arch/powerpc/kvm/booke.c  | 73 +++
 arch/powerpc/kvm/booke.h  |  5 +++
 arch/powerpc/kvm/bookehv_interrupts.S | 10 +++--
 arch/powerpc/kvm/e500_emulate.c   | 18 +
 4 files changed, 102 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0c6f616..c5cca09 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -168,6 +168,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (!(current-thread.regs-msr  MSR_VEC)) {
+   enable_kernel_altivec();
+   load_vr_state(vcpu-arch.vr);
+   current-thread.vr_save_area = vcpu-arch.vr;
+   current-thread.regs-msr |= MSR_VEC;
+   }
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu AltiVec state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (current-thread.regs-msr  MSR_VEC)
+   giveup_altivec(current);
+   current-thread.vr_save_area = NULL;
+   }
+#endif
+}
+
 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
 {
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
@@ -375,9 +409,14 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu 
*vcpu,
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
+#ifdef CONFIG_KVM_E500V2
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
+#else
+   case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
+   case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
+#endif
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
@@ -693,6 +732,17 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   /* Save userspace AltiVec state in stack */
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   enable_kernel_altivec();
+   /*
+* Since we can't trap on MSR_VEC in GS-mode, we consider the guest
+* as always using the AltiVec.
+*/
+   kvmppc_load_guest_altivec(vcpu);
+#endif
+
/* Switch to guest debug context */
debug = vcpu-arch.shadow_dbg_reg;
switch_booke_debug_regs(debug);
@@ -715,6 +765,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_save_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   kvmppc_save_guest_altivec(vcpu);
+#endif
+
 out:
vcpu-mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -999,6 +1053,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
 
+#ifdef CONFIG_KVM_E500V2
 #ifdef CONFIG_SPE
case BOOKE_INTERRUPT_SPE_UNAVAIL: {
if (vcpu-arch.shared-msr  MSR_SPE)
@@ -1040,7 +1095,24 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST;
break;
+#endif /* !CONFIG_SPE */
+#else
+/*
+ * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
+ * see kvmppc_core_check_processor_compat().
+ */
+#ifdef CONFIG_ALTIVEC
+   case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
+   kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
+   r = RESUME_GUEST;
+   break;
+
+   case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
+   kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
+   r = RESUME_GUEST;
+   break;
 #endif
+#endif /* !CONFIG_KVM_E500V2 */
 
case BOOKE_INTERRUPT_DATA_STORAGE:
kvmppc_core_queue_data_storage(vcpu, vcpu-arch.fault_dear,
@@ -1217,6 +1289,7 @@ out:
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
kvmppc_load_guest_fp(vcpu

[PATCH v3 3/5] KVM: PPC: Move ONE_REG AltiVec support to powerpc

2014-08-05 Thread Mihai Caraman
Make ONE_REG AltiVec support common across server and embedded implementations
moving kvm_vcpu_ioctl_get_one_reg() and kvm_vcpu_ioctl_set_one_reg() functions
to powerpc layer.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v3:
 - make ONE_REG AltiVec support powerpc generic

v2:
 - add comment describing VCSR register representation in KVM vs kernel

 arch/powerpc/include/uapi/asm/kvm.h |   5 ++
 arch/powerpc/kvm/book3s.c   | 151 +++-
 arch/powerpc/kvm/booke.c|  85 
 arch/powerpc/kvm/powerpc.c  |  97 +++
 4 files changed, 179 insertions(+), 159 deletions(-)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index e0e49db..7a27ff0 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -476,6 +476,11 @@ struct kvm_get_htab_header {
 
 /* FP and vector status/control registers */
 #define KVM_REG_PPC_FPSCR  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+/*
+ * VSCR register is documented as a 32-bit register in the ISA, but it can
+ * only be accesses via a vector register. Expose VSCR as a 32-bit register
+ * even though the kernel represents it as a 128-bit vector.
+ */
 #define KVM_REG_PPC_VSCR   (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
 
 /* Virtual processor areas */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index dd03f6b..1b5adda 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -535,174 +535,111 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, 
struct kvm_fpu *fpu)
return -ENOTSUPP;
 }
 
-int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+   union kvmppc_one_reg *val)
 {
-   int r;
-   union kvmppc_one_reg val;
-   int size;
+   int r = 0;
long int i;
 
-   size = one_reg_size(reg-id);
-   if (size  sizeof(val))
-   return -EINVAL;
-
-   r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, reg-id, val);
+   r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, id, val);
if (r == -EINVAL) {
r = 0;
-   switch (reg-id) {
+   switch (id) {
case KVM_REG_PPC_DAR:
-   val = get_reg_val(reg-id, kvmppc_get_dar(vcpu));
+   *val = get_reg_val(id, kvmppc_get_dar(vcpu));
break;
case KVM_REG_PPC_DSISR:
-   val = get_reg_val(reg-id, kvmppc_get_dsisr(vcpu));
+   *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
-   i = reg-id - KVM_REG_PPC_FPR0;
-   val = get_reg_val(reg-id, VCPU_FPR(vcpu, i));
+   i = id - KVM_REG_PPC_FPR0;
+   *val = get_reg_val(id, VCPU_FPR(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
-   val = get_reg_val(reg-id, vcpu-arch.fp.fpscr);
-   break;
-#ifdef CONFIG_ALTIVEC
-   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   val.vval = vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0];
+   *val = get_reg_val(id, vcpu-arch.fp.fpscr);
break;
-   case KVM_REG_PPC_VSCR:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   val = get_reg_val(reg-id, vcpu-arch.vr.vscr.u[3]);
-   break;
-   case KVM_REG_PPC_VRSAVE:
-   val = get_reg_val(reg-id, vcpu-arch.vrsave);
-   break;
-#endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
-   long int i = reg-id - KVM_REG_PPC_VSR0;
-   val.vsxval[0] = vcpu-arch.fp.fpr[i][0];
-   val.vsxval[1] = vcpu-arch.fp.fpr[i][1];
+   i = id - KVM_REG_PPC_VSR0;
+   val-vsxval[0] = vcpu-arch.fp.fpr[i][0];
+   val-vsxval[1] = vcpu-arch.fp.fpr[i][1];
} else {
r = -ENXIO;
}
break;
 #endif /* CONFIG_VSX */
-   case KVM_REG_PPC_DEBUG_INST: {
-   u32 opcode = INS_TW;
-   r = copy_to_user

[PATCH v3 5/5] KVM: PPC: Book3E: Enable e6500 core

2014-08-05 Thread Mihai Caraman
Now that AltiVec support is in place enable e6500 core.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2-v3:
 - no changes

 arch/powerpc/kvm/e500mc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 19dd927..aa48dc3 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,6 +177,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec-cpu_name, e5500) == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+   /*
+* Since guests have the priviledge to enable AltiVec, we need AltiVec
+* support in the host to save/restore their context.
+* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+* because it's cleared in the absence of CONFIG_ALTIVEC!
+*/
+   else if (strcmp(cur_cpu_spec-cpu_name, e6500) == 0)
+   r = 0;
+#endif
else
r = -ENOTSUPP;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 4/5] KVM: PPC: Booke: Add ONE_REG IVORs support

2014-08-05 Thread Mihai Caraman
Add ONE_REG IVORs support, with IVORs 0-15 and 35 booke common.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v3:
 - new patch

 arch/powerpc/include/uapi/asm/kvm.h |  24 +++
 arch/powerpc/kvm/booke.c| 132 
 arch/powerpc/kvm/e500.c |  42 +++-
 arch/powerpc/kvm/e500mc.c   |  32 +
 4 files changed, 228 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 7a27ff0..174fed0 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -563,6 +563,30 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_WORT   (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
 #define KVM_REG_PPC_SPRG9  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
 
+/* Booke IVOR registers */
+#define KVM_REG_PPC_IVOR0  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc0)
+#define KVM_REG_PPC_IVOR1  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc1)
+#define KVM_REG_PPC_IVOR2  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc2)
+#define KVM_REG_PPC_IVOR3  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc3)
+#define KVM_REG_PPC_IVOR4  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc4)
+#define KVM_REG_PPC_IVOR5  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc5)
+#define KVM_REG_PPC_IVOR6  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc6)
+#define KVM_REG_PPC_IVOR7  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc7)
+#define KVM_REG_PPC_IVOR8  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc8)
+#define KVM_REG_PPC_IVOR9  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc9)
+#define KVM_REG_PPC_IVOR10 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xca)
+#define KVM_REG_PPC_IVOR11 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcb)
+#define KVM_REG_PPC_IVOR12 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcc)
+#define KVM_REG_PPC_IVOR13 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcd)
+#define KVM_REG_PPC_IVOR14 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xce)
+#define KVM_REG_PPC_IVOR15 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcf)
+#define KVM_REG_PPC_IVOR32 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd0)
+#define KVM_REG_PPC_IVOR33 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd1)
+#define KVM_REG_PPC_IVOR34 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd2)
+#define KVM_REG_PPC_IVOR35 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd3)
+#define KVM_REG_PPC_IVOR36 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd4)
+#define KVM_REG_PPC_IVOR37 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd5)
+
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
  */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4fe7f68..ffa82a5 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1565,6 +1565,72 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
int r = 0;
 
switch (id) {
+   case KVM_REG_PPC_IVOR0:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+   break;
+   case KVM_REG_PPC_IVOR1:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
+   break;
+   case KVM_REG_PPC_IVOR2:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+   break;
+   case KVM_REG_PPC_IVOR3:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
+   break;
+   case KVM_REG_PPC_IVOR4:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
+   break;
+   case KVM_REG_PPC_IVOR5:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
+   break;
+   case KVM_REG_PPC_IVOR6:
+   *val = get_reg_val(id, vcpu-arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
+   break;
+   case KVM_REG_PPC_IVOR7:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
+   break;
+   case KVM_REG_PPC_IVOR8:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+   break;
+   case KVM_REG_PPC_IVOR9:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
+   break;
+   case KVM_REG_PPC_IVOR10:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
+   break;
+   case KVM_REG_PPC_IVOR11:
+   *val = get_reg_val(id, vcpu-arch.ivor[BOOKE_IRQPRIO_FIT]);
+   break;
+   case KVM_REG_PPC_IVOR12:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
+   break;
+   case KVM_REG_PPC_IVOR13

[PATCH v3 1/5] KVM: PPC: Book3e: Increase FPU laziness

2014-08-05 Thread Mihai Caraman
Increase FPU laziness by calling kvmppc_load_guest_fp() just before
returning to guest instead of each sched in. Without this improvement
an interrupt may also claim floting point corrupting guest state.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v3:
 - no changes

v2:
 - remove fpu_active
 - add descriptive comments

 arch/powerpc/kvm/booke.c  | 43 ---
 arch/powerpc/kvm/booke.h  | 34 --
 arch/powerpc/kvm/e500mc.c |  2 --
 3 files changed, 36 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index b4c89fa..0c6f616 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 }
 #endif
 
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (!(current-thread.regs-msr  MSR_FP)) {
+   enable_kernel_fp();
+   load_fp_state(vcpu-arch.fp);
+   current-thread.fp_save_area = vcpu-arch.fp;
+   current-thread.regs-msr |= MSR_FP;
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (current-thread.regs-msr  MSR_FP)
+   giveup_fpu(current);
+   current-thread.fp_save_area = NULL;
+#endif
+}
+
 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 {
 #if defined(CONFIG_PPC_FPU)  !defined(CONFIG_KVM_BOOKE_HV)
@@ -654,12 +688,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
/*
 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
-* as always using the FPU.  Kernel usage of FP (via
-* enable_kernel_fp()) in this thread must not occur while
-* vcpu-fpu_active is set.
+* as always using the FPU.
 */
-   vcpu-fpu_active = 1;
-
kvmppc_load_guest_fp(vcpu);
 #endif
 
@@ -683,8 +713,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
 #ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
-
-   vcpu-fpu_active = 0;
 #endif
 
 out:
@@ -1188,6 +1216,7 @@ out:
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
+   kvmppc_load_guest_fp(vcpu);
}
}
 
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f753543..e73d513 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -116,40 +116,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu 
*vcpu, int sprn,
 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
  ulong *spr_val);
 
-/*
- * Load up guest vcpu FP state if it's needed.
- * It also set the MSR_FP in thread so that host know
- * we're holding FPU, and then host can help to save
- * guest vcpu FP state if other threads require to use FPU.
- * This simulates an FP unavailable fault.
- *
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  !(current-thread.regs-msr  MSR_FP)) {
-   enable_kernel_fp();
-   load_fp_state(vcpu-arch.fp);
-   current-thread.fp_save_area = vcpu-arch.fp;
-   current-thread.regs-msr |= MSR_FP;
-   }
-#endif
-}
-
-/*
- * Save guest vcpu FP state into thread.
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  (current-thread.regs-msr  MSR_FP))
-   giveup_fpu(current);
-   current-thread.fp_save_area = NULL;
-#endif
-}
-
 static inline void kvmppc_clear_dbsr(void)
 {
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 164bad2..67c06eb 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -145,8 +145,6 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu 
*vcpu, int cpu)
kvmppc_e500_tlbil_all(vcpu_e500);
__get_cpu_var(last_vcpu_of_lpid)[vcpu-kvm-arch.lpid] = vcpu;
}
-
-   kvmppc_load_guest_fp(vcpu);
 }
 
 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body

[PATCH v3 0/5] KVM: PPC: Book3e: AltiVec support

2014-08-05 Thread Mihai Caraman
Add KVM Book3e AltiVec support and enable e6500 core.

Changes:

v3:
 - use distinct SPE/AltiVec exception handlers
 - make ONE_REG AltiVec support powerpc generic
 - add ONE_REG IVORs support

v2:
 - integrate Paul's FP/VMX/VSX changes that landed in kvm-ppc-queue
   in January and take into account feedback

Mihai Caraman (5):
  KVM: PPC: Book3e: Increase FPU laziness
  KVM: PPC: Book3e: Add AltiVec support
  KVM: PPC: Move ONE_REG AltiVec support to powerpc
  KVM: PPC: Booke: Add ONE_REG IVORs support
  KVM: PPC: Book3e: Enable e6500 core

 arch/powerpc/include/uapi/asm/kvm.h   |  29 +++
 arch/powerpc/kvm/book3s.c | 151 +---
 arch/powerpc/kvm/booke.c  | 331 --
 arch/powerpc/kvm/booke.h  |  39 +---
 arch/powerpc/kvm/bookehv_interrupts.S |  10 +-
 arch/powerpc/kvm/e500.c   |  42 -
 arch/powerpc/kvm/e500_emulate.c   |  18 ++
 arch/powerpc/kvm/e500mc.c |  44 -
 arch/powerpc/kvm/powerpc.c|  97 ++
 9 files changed, 554 insertions(+), 207 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 2/5] KVM: PPC: Book3e: Add AltiVec support

2014-08-05 Thread Mihai Caraman
Add KVM Book3e AltiVec support. KVM Book3e FPU support gracefully reuse host
infrastructure so follow the same approach for AltiVec.

Keep SPE/AltiVec exception handlers distinct using CONFIG_KVM_E500V2.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v3:
 - use distinct SPE/AltiVec exception handlers

v2:
 - integrate Paul's FP/VMX/VSX changes

 arch/powerpc/kvm/booke.c  | 73 +++
 arch/powerpc/kvm/booke.h  |  5 +++
 arch/powerpc/kvm/bookehv_interrupts.S | 10 +++--
 arch/powerpc/kvm/e500_emulate.c   | 18 +
 4 files changed, 102 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0c6f616..c5cca09 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -168,6 +168,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (!(current-thread.regs-msr  MSR_VEC)) {
+   enable_kernel_altivec();
+   load_vr_state(vcpu-arch.vr);
+   current-thread.vr_save_area = vcpu-arch.vr;
+   current-thread.regs-msr |= MSR_VEC;
+   }
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu AltiVec state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (current-thread.regs-msr  MSR_VEC)
+   giveup_altivec(current);
+   current-thread.vr_save_area = NULL;
+   }
+#endif
+}
+
 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
 {
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
@@ -375,9 +409,14 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu 
*vcpu,
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
+#ifdef CONFIG_KVM_E500V2
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
+#else
+   case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
+   case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
+#endif
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
@@ -693,6 +732,17 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   /* Save userspace AltiVec state in stack */
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   enable_kernel_altivec();
+   /*
+* Since we can't trap on MSR_VEC in GS-mode, we consider the guest
+* as always using the AltiVec.
+*/
+   kvmppc_load_guest_altivec(vcpu);
+#endif
+
/* Switch to guest debug context */
debug = vcpu-arch.shadow_dbg_reg;
switch_booke_debug_regs(debug);
@@ -715,6 +765,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_save_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   kvmppc_save_guest_altivec(vcpu);
+#endif
+
 out:
vcpu-mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -999,6 +1053,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
 
+#ifdef CONFIG_KVM_E500V2
 #ifdef CONFIG_SPE
case BOOKE_INTERRUPT_SPE_UNAVAIL: {
if (vcpu-arch.shared-msr  MSR_SPE)
@@ -1040,7 +1095,24 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST;
break;
+#endif /* !CONFIG_SPE */
+#else
+/*
+ * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
+ * see kvmppc_core_check_processor_compat().
+ */
+#ifdef CONFIG_ALTIVEC
+   case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
+   kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
+   r = RESUME_GUEST;
+   break;
+
+   case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
+   kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
+   r = RESUME_GUEST;
+   break;
 #endif
+#endif /* !CONFIG_KVM_E500V2 */
 
case BOOKE_INTERRUPT_DATA_STORAGE:
kvmppc_core_queue_data_storage(vcpu, vcpu-arch.fault_dear,
@@ -1217,6 +1289,7 @@ out:
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
kvmppc_load_guest_fp(vcpu

[PATCH v3 3/5] KVM: PPC: Move ONE_REG AltiVec support to powerpc

2014-08-05 Thread Mihai Caraman
Make ONE_REG AltiVec support common across server and embedded implementations
moving kvm_vcpu_ioctl_get_one_reg() and kvm_vcpu_ioctl_set_one_reg() functions
to powerpc layer.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v3:
 - make ONE_REG AltiVec support powerpc generic

v2:
 - add comment describing VCSR register representation in KVM vs kernel

 arch/powerpc/include/uapi/asm/kvm.h |   5 ++
 arch/powerpc/kvm/book3s.c   | 151 +++-
 arch/powerpc/kvm/booke.c|  85 
 arch/powerpc/kvm/powerpc.c  |  97 +++
 4 files changed, 179 insertions(+), 159 deletions(-)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index e0e49db..7a27ff0 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -476,6 +476,11 @@ struct kvm_get_htab_header {
 
 /* FP and vector status/control registers */
 #define KVM_REG_PPC_FPSCR  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+/*
+ * VSCR register is documented as a 32-bit register in the ISA, but it can
+ * only be accesses via a vector register. Expose VSCR as a 32-bit register
+ * even though the kernel represents it as a 128-bit vector.
+ */
 #define KVM_REG_PPC_VSCR   (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
 
 /* Virtual processor areas */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index dd03f6b..1b5adda 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -535,174 +535,111 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, 
struct kvm_fpu *fpu)
return -ENOTSUPP;
 }
 
-int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+   union kvmppc_one_reg *val)
 {
-   int r;
-   union kvmppc_one_reg val;
-   int size;
+   int r = 0;
long int i;
 
-   size = one_reg_size(reg-id);
-   if (size  sizeof(val))
-   return -EINVAL;
-
-   r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, reg-id, val);
+   r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, id, val);
if (r == -EINVAL) {
r = 0;
-   switch (reg-id) {
+   switch (id) {
case KVM_REG_PPC_DAR:
-   val = get_reg_val(reg-id, kvmppc_get_dar(vcpu));
+   *val = get_reg_val(id, kvmppc_get_dar(vcpu));
break;
case KVM_REG_PPC_DSISR:
-   val = get_reg_val(reg-id, kvmppc_get_dsisr(vcpu));
+   *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
-   i = reg-id - KVM_REG_PPC_FPR0;
-   val = get_reg_val(reg-id, VCPU_FPR(vcpu, i));
+   i = id - KVM_REG_PPC_FPR0;
+   *val = get_reg_val(id, VCPU_FPR(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
-   val = get_reg_val(reg-id, vcpu-arch.fp.fpscr);
-   break;
-#ifdef CONFIG_ALTIVEC
-   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   val.vval = vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0];
+   *val = get_reg_val(id, vcpu-arch.fp.fpscr);
break;
-   case KVM_REG_PPC_VSCR:
-   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-   r = -ENXIO;
-   break;
-   }
-   val = get_reg_val(reg-id, vcpu-arch.vr.vscr.u[3]);
-   break;
-   case KVM_REG_PPC_VRSAVE:
-   val = get_reg_val(reg-id, vcpu-arch.vrsave);
-   break;
-#endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
-   long int i = reg-id - KVM_REG_PPC_VSR0;
-   val.vsxval[0] = vcpu-arch.fp.fpr[i][0];
-   val.vsxval[1] = vcpu-arch.fp.fpr[i][1];
+   i = id - KVM_REG_PPC_VSR0;
+   val-vsxval[0] = vcpu-arch.fp.fpr[i][0];
+   val-vsxval[1] = vcpu-arch.fp.fpr[i][1];
} else {
r = -ENXIO;
}
break;
 #endif /* CONFIG_VSX */
-   case KVM_REG_PPC_DEBUG_INST: {
-   u32 opcode = INS_TW;
-   r = copy_to_user

[PATCH v3 5/5] KVM: PPC: Book3E: Enable e6500 core

2014-08-05 Thread Mihai Caraman
Now that AltiVec support is in place enable e6500 core.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2-v3:
 - no changes

 arch/powerpc/kvm/e500mc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 19dd927..aa48dc3 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,6 +177,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec-cpu_name, e5500) == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+   /*
+* Since guests have the priviledge to enable AltiVec, we need AltiVec
+* support in the host to save/restore their context.
+* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+* because it's cleared in the absence of CONFIG_ALTIVEC!
+*/
+   else if (strcmp(cur_cpu_spec-cpu_name, e6500) == 0)
+   r = 0;
+#endif
else
r = -ENOTSUPP;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 4/5] KVM: PPC: Booke: Add ONE_REG IVORs support

2014-08-05 Thread Mihai Caraman
Add ONE_REG IVORs support, with IVORs 0-15 and 35 booke common.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v3:
 - new patch

 arch/powerpc/include/uapi/asm/kvm.h |  24 +++
 arch/powerpc/kvm/booke.c| 132 
 arch/powerpc/kvm/e500.c |  42 +++-
 arch/powerpc/kvm/e500mc.c   |  32 +
 4 files changed, 228 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 7a27ff0..174fed0 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -563,6 +563,30 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_WORT   (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
 #define KVM_REG_PPC_SPRG9  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
 
+/* Booke IVOR registers */
+#define KVM_REG_PPC_IVOR0  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc0)
+#define KVM_REG_PPC_IVOR1  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc1)
+#define KVM_REG_PPC_IVOR2  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc2)
+#define KVM_REG_PPC_IVOR3  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc3)
+#define KVM_REG_PPC_IVOR4  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc4)
+#define KVM_REG_PPC_IVOR5  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc5)
+#define KVM_REG_PPC_IVOR6  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc6)
+#define KVM_REG_PPC_IVOR7  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc7)
+#define KVM_REG_PPC_IVOR8  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc8)
+#define KVM_REG_PPC_IVOR9  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xc9)
+#define KVM_REG_PPC_IVOR10 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xca)
+#define KVM_REG_PPC_IVOR11 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcb)
+#define KVM_REG_PPC_IVOR12 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcc)
+#define KVM_REG_PPC_IVOR13 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcd)
+#define KVM_REG_PPC_IVOR14 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xce)
+#define KVM_REG_PPC_IVOR15 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xcf)
+#define KVM_REG_PPC_IVOR32 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd0)
+#define KVM_REG_PPC_IVOR33 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd1)
+#define KVM_REG_PPC_IVOR34 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd2)
+#define KVM_REG_PPC_IVOR35 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd3)
+#define KVM_REG_PPC_IVOR36 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd4)
+#define KVM_REG_PPC_IVOR37 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd5)
+
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
  */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4fe7f68..ffa82a5 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1565,6 +1565,72 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
int r = 0;
 
switch (id) {
+   case KVM_REG_PPC_IVOR0:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+   break;
+   case KVM_REG_PPC_IVOR1:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
+   break;
+   case KVM_REG_PPC_IVOR2:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+   break;
+   case KVM_REG_PPC_IVOR3:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
+   break;
+   case KVM_REG_PPC_IVOR4:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
+   break;
+   case KVM_REG_PPC_IVOR5:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
+   break;
+   case KVM_REG_PPC_IVOR6:
+   *val = get_reg_val(id, vcpu-arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
+   break;
+   case KVM_REG_PPC_IVOR7:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
+   break;
+   case KVM_REG_PPC_IVOR8:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+   break;
+   case KVM_REG_PPC_IVOR9:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
+   break;
+   case KVM_REG_PPC_IVOR10:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
+   break;
+   case KVM_REG_PPC_IVOR11:
+   *val = get_reg_val(id, vcpu-arch.ivor[BOOKE_IRQPRIO_FIT]);
+   break;
+   case KVM_REG_PPC_IVOR12:
+   *val = get_reg_val(id,
+   vcpu-arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
+   break;
+   case KVM_REG_PPC_IVOR13

[PATCH v3 1/5] KVM: PPC: Book3e: Increase FPU laziness

2014-08-05 Thread Mihai Caraman
Increase FPU laziness by calling kvmppc_load_guest_fp() just before
returning to guest instead of each sched in. Without this improvement
an interrupt may also claim floting point corrupting guest state.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v3:
 - no changes

v2:
 - remove fpu_active
 - add descriptive comments

 arch/powerpc/kvm/booke.c  | 43 ---
 arch/powerpc/kvm/booke.h  | 34 --
 arch/powerpc/kvm/e500mc.c |  2 --
 3 files changed, 36 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index b4c89fa..0c6f616 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 }
 #endif
 
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (!(current-thread.regs-msr  MSR_FP)) {
+   enable_kernel_fp();
+   load_fp_state(vcpu-arch.fp);
+   current-thread.fp_save_area = vcpu-arch.fp;
+   current-thread.regs-msr |= MSR_FP;
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (current-thread.regs-msr  MSR_FP)
+   giveup_fpu(current);
+   current-thread.fp_save_area = NULL;
+#endif
+}
+
 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 {
 #if defined(CONFIG_PPC_FPU)  !defined(CONFIG_KVM_BOOKE_HV)
@@ -654,12 +688,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
/*
 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
-* as always using the FPU.  Kernel usage of FP (via
-* enable_kernel_fp()) in this thread must not occur while
-* vcpu-fpu_active is set.
+* as always using the FPU.
 */
-   vcpu-fpu_active = 1;
-
kvmppc_load_guest_fp(vcpu);
 #endif
 
@@ -683,8 +713,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
 #ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
-
-   vcpu-fpu_active = 0;
 #endif
 
 out:
@@ -1188,6 +1216,7 @@ out:
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
+   kvmppc_load_guest_fp(vcpu);
}
}
 
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f753543..e73d513 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -116,40 +116,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu 
*vcpu, int sprn,
 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
  ulong *spr_val);
 
-/*
- * Load up guest vcpu FP state if it's needed.
- * It also set the MSR_FP in thread so that host know
- * we're holding FPU, and then host can help to save
- * guest vcpu FP state if other threads require to use FPU.
- * This simulates an FP unavailable fault.
- *
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  !(current-thread.regs-msr  MSR_FP)) {
-   enable_kernel_fp();
-   load_fp_state(vcpu-arch.fp);
-   current-thread.fp_save_area = vcpu-arch.fp;
-   current-thread.regs-msr |= MSR_FP;
-   }
-#endif
-}
-
-/*
- * Save guest vcpu FP state into thread.
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  (current-thread.regs-msr  MSR_FP))
-   giveup_fpu(current);
-   current-thread.fp_save_area = NULL;
-#endif
-}
-
 static inline void kvmppc_clear_dbsr(void)
 {
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 164bad2..67c06eb 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -145,8 +145,6 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu 
*vcpu, int cpu)
kvmppc_e500_tlbil_all(vcpu_e500);
__get_cpu_var(last_vcpu_of_lpid)[vcpu-kvm-arch.lpid] = vcpu;
}
-
-   kvmppc_load_guest_fp(vcpu);
 }
 
 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body

[PATCH v6 0/5] Read guest last instruction from kvmppc_get_last_inst()

2014-07-23 Thread Mihai Caraman
Read guest last instruction from kvmppc_get_last_inst() allowing the function
to fail in order to emulate again. On bookehv architecture search for
the physical address and kmap it, instead of using Load External PID (lwepx)
instruction. This fixes an infinite loop caused by lwepx's data TLB miss
exception handled in the host and the TODO for execute-but-not-read entries
and TLB eviction.

Mihai Caraman (5):
  KVM: PPC: e500mc: Revert add load inst fixup
  KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1
  KVM: PPC: Book3s: Remove kvmppc_read_inst() function
  KVM: PPC: Alow kvmppc_get_last_inst() to fail
  KVM: PPC: Bookehv: Get vcpu's last instruction for emulation

 arch/powerpc/include/asm/kvm_book3s.h|  26 ---
 arch/powerpc/include/asm/kvm_booke.h |   5 --
 arch/powerpc/include/asm/kvm_ppc.h   |  31 +
 arch/powerpc/include/asm/mmu-book3e.h|   9 ++-
 arch/powerpc/kvm/book3s.c|  17 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  17 ++---
 arch/powerpc/kvm/book3s_paired_singles.c |  38 +++
 arch/powerpc/kvm/book3s_pr.c | 114 ---
 arch/powerpc/kvm/booke.c |  47 +
 arch/powerpc/kvm/bookehv_interrupts.S|  55 ++-
 arch/powerpc/kvm/e500_mmu_host.c |  98 ++
 arch/powerpc/kvm/emulate.c   |  18 +++--
 arch/powerpc/kvm/powerpc.c   |  11 ++-
 13 files changed, 314 insertions(+), 172 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 1/5] KVM: PPC: e500mc: Revert add load inst fixup

2014-07-23 Thread Mihai Caraman
The commit 1d628af7 add load inst fixup made an attempt to handle
failures generated by reading the guest current instruction. The fixup
code that was added works by chance hiding the real issue.

Load external pid (lwepx) instruction, used by KVM to read guest
instructions, is executed in a subsituted guest translation context
(EPLC[EGS] = 1). In consequence lwepx's TLB error and data storage
interrupts need to be handled by KVM, even though these interrupts
are generated from host context (MSR[GS] = 0) where lwepx is executed.

Currently, KVM hooks only interrupts generated from guest context
(MSR[GS] = 1), doing minimal checks on the fast path to avoid host
performance degradation. As a result, the host kernel handles lwepx
faults searching the faulting guest data address (loaded in DEAR) in
its own Logical Partition ID (LPID) 0 context. In case a host translation
is found the execution returns to the lwepx instruction instead of the
fixup, the host ending up in an infinite loop.

Revert the commit add load inst fixup. lwepx issue will be addressed
in a subsequent patch without needing fixup code.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v6-v2:
 - no change

 arch/powerpc/kvm/bookehv_interrupts.S | 26 +-
 1 file changed, 1 insertion(+), 25 deletions(-)

diff --git a/arch/powerpc/kvm/bookehv_interrupts.S 
b/arch/powerpc/kvm/bookehv_interrupts.S
index a1712b8..6ff4480 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -29,7 +29,6 @@
 #include asm/asm-compat.h
 #include asm/asm-offsets.h
 #include asm/bitsperlong.h
-#include asm/thread_info.h
 
 #ifdef CONFIG_64BIT
 #include asm/exception-64e.h
@@ -164,32 +163,9 @@
PPC_STL r30, VCPU_GPR(R30)(r4)
PPC_STL r31, VCPU_GPR(R31)(r4)
mtspr   SPRN_EPLC, r8
-
-   /* disable preemption, so we are sure we hit the fixup handler */
-   CURRENT_THREAD_INFO(r8, r1)
-   li  r7, 1
-   stw r7, TI_PREEMPT(r8)
-
isync
-
-   /*
-* In case the read goes wrong, we catch it and write an invalid value
-* in LAST_INST instead.
-*/
-1: lwepx   r9, 0, r5
-2:
-.section .fixup, ax
-3: li  r9, KVM_INST_FETCH_FAILED
-   b   2b
-.previous
-.section __ex_table,a
-   PPC_LONG_ALIGN
-   PPC_LONG 1b,3b
-.previous
-
+   lwepx   r9, 0, r5
mtspr   SPRN_EPLC, r3
-   li  r7, 0
-   stw r7, TI_PREEMPT(r8)
stw r9, VCPU_LAST_INST(r4)
.endif
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 2/5] KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1

2014-07-23 Thread Mihai Caraman
Add mising defines MAS0_GET_TLBSEL() and MAS1_GET_TSIZE() for Book3E.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v6-v2:
 - no change

 arch/powerpc/include/asm/mmu-book3e.h | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 8d24f78..cd4f04a 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,9 +40,11 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL_MASK0x3000
-#define MAS0_TLBSEL_SHIFT   28
-#define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
+#define MAS0_TLBSEL_MASK   0x3000
+#define MAS0_TLBSEL_SHIFT  28
+#define MAS0_TLBSEL(x) (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
+#define MAS0_GET_TLBSEL(mas0)  (((mas0)  MAS0_TLBSEL_MASK)  \
+   MAS0_TLBSEL_SHIFT)
 #define MAS0_ESEL_MASK 0x0FFF
 #define MAS0_ESEL_SHIFT16
 #define MAS0_ESEL(x)   (((x)  MAS0_ESEL_SHIFT)  MAS0_ESEL_MASK)
@@ -60,6 +62,7 @@
 #define MAS1_TSIZE_MASK0x0f80
 #define MAS1_TSIZE_SHIFT   7
 #define MAS1_TSIZE(x)  (((x)  MAS1_TSIZE_SHIFT)  MAS1_TSIZE_MASK)
+#define MAS1_GET_TSIZE(mas1)   (((mas1)  MAS1_TSIZE_MASK)  MAS1_TSIZE_SHIFT)
 
 #define MAS2_EPN   (~0xFFFUL)
 #define MAS2_X00x0040
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 5/5] KVM: PPC: Bookehv: Get vcpu's last instruction for emulation

2014-07-23 Thread Mihai Caraman
On book3e, KVM uses load external pid (lwepx) dedicated instruction to read
guest last instruction on the exit path. lwepx exceptions (DTLB_MISS, DSI
and LRAT), generated by loading a guest address, needs to be handled by KVM.
These exceptions are generated in a substituted guest translation context
(EPLC[EGS] = 1) from host context (MSR[GS] = 0).

Currently, KVM hooks only interrupts generated from guest context (MSR[GS] = 1),
doing minimal checks on the fast path to avoid host performance degradation.
lwepx exceptions originate from host state (MSR[GS] = 0) which implies
additional checks in DO_KVM macro (beside the current MSR[GS] = 1) by looking
at the Exception Syndrome Register (ESR[EPID]) and the External PID Load Context
Register (EPLC[EGS]). Doing this on each Data TLB miss exception is obvious
too intrusive for the host.

Read guest last instruction from kvmppc_load_last_inst() by searching for the
physical address and kmap it. This address the TODO for TLB eviction and
execute-but-not-read entries, and allow us to get rid of lwepx until we are
able to handle failures.

A simple stress benchmark shows a 1% sys performance degradation compared with
previous approach (lwepx without failure handling):

time for i in `seq 1 1`; do /bin/echo  /dev/null; done

real0m 8.85s
user0m 4.34s
sys 0m 4.48s

vs

real0m 8.84s
user0m 4.36s
sys 0m 4.44s

A solution to use lwepx and to handle its exceptions in KVM would be to 
temporary
highjack the interrupt vector from host. This imposes additional 
synchronizations
for cores like FSL e6500 that shares host IVOR registers between hardware 
threads.
This optimized solution can be later developed on top of this patch.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v6:
 - no change

v5:
 - return ENULATE_AGAIN in case of failure

v4:
 - add switch and new function when getting last inst earlier
 - use enum instead of prev semnatic
 - get rid of mas0, optimize mas7_mas3
 - give more context in visible messages
 - check storage attributes mismatch on MMUv2
 - get rid of pfn_valid check

v3:
 - reworked patch description
 - use unaltered kmap addr for kunmap
 - get last instruction before beeing preempted

v2:
 - reworked patch description
 - used pr_* functions
 - addressed cosmetic feedback

 arch/powerpc/kvm/booke.c  | 44 +
 arch/powerpc/kvm/bookehv_interrupts.S | 37 --
 arch/powerpc/kvm/e500_mmu_host.c  | 92 +++
 3 files changed, 145 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 34a42b9..843077b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -869,6 +869,28 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
 }
 
+static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ enum emulation_result emulated, u32 last_inst)
+{
+   switch (emulated) {
+   case EMULATE_AGAIN:
+   return RESUME_GUEST;
+
+   case EMULATE_FAIL:
+   pr_debug(%s: load instruction from guest address %lx failed\n,
+  __func__, vcpu-arch.pc);
+   /* For debugging, encode the failing instruction and
+* report it to userspace. */
+   run-hw.hardware_exit_reason = ~0ULL  32;
+   run-hw.hardware_exit_reason |= last_inst;
+   kvmppc_core_queue_program(vcpu, ESR_PIL);
+   return RESUME_HOST;
+
+   default:
+   BUG();
+   }
+}
+
 /**
  * kvmppc_handle_exit
  *
@@ -880,6 +902,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu 
*vcpu,
int r = RESUME_HOST;
int s;
int idx;
+   u32 last_inst = KVM_INST_FETCH_FAILED;
+   enum emulation_result emulated = EMULATE_DONE;
 
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -887,6 +911,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
/* restart interrupts if they were meant for the host */
kvmppc_restart_interrupt(vcpu, exit_nr);
 
+   /*
+* get last instruction before beeing preempted
+* TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR  ESR_DATA
+*/
+   switch (exit_nr) {
+   case BOOKE_INTERRUPT_DATA_STORAGE:
+   case BOOKE_INTERRUPT_DTLB_MISS:
+   case BOOKE_INTERRUPT_HV_PRIV:
+   emulated = kvmppc_get_last_inst(vcpu, false, last_inst);
+   break;
+   default:
+   break;
+   }
+
local_irq_enable();
 
trace_kvm_exit(exit_nr, vcpu);
@@ -895,6 +933,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-exit_reason = KVM_EXIT_UNKNOWN;
run-ready_for_interrupt_injection = 1;
 
+   if (emulated != EMULATE_DONE) {
+   r = kvmppc_resume_inst_load(run

[PATCH v6 3/5] KVM: PPC: Book3s: Remove kvmppc_read_inst() function

2014-07-23 Thread Mihai Caraman
In the context of replacing kvmppc_ld() function calls with a version of
kvmppc_get_last_inst() which allow to fail, Alex Graf suggested this:

If we get EMULATE_AGAIN, we just have to make sure we go back into the guest.
No need to inject an ISI into  the guest - it'll do that all by itself.
With an error returning kvmppc_get_last_inst we can just use completely
get rid of kvmppc_read_inst() and only use kvmppc_get_last_inst() instead.

As a intermediate step get rid of kvmppc_read_inst() and only use kvmppc_ld()
instead.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v6:
 - add proper comments for VSX interrupt handling

v5:
 - make paired single emulation the unusual

v4:
 - new patch

 arch/powerpc/kvm/book3s_pr.c | 85 ++--
 1 file changed, 34 insertions(+), 51 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e40765f..e76aec3 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -710,42 +710,6 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong 
fac)
 #endif
 }
 
-static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
-{
-   ulong srr0 = kvmppc_get_pc(vcpu);
-   u32 last_inst = kvmppc_get_last_inst(vcpu);
-   int ret;
-
-   ret = kvmppc_ld(vcpu, srr0, sizeof(u32), last_inst, false);
-   if (ret == -ENOENT) {
-   ulong msr = kvmppc_get_msr(vcpu);
-
-   msr = kvmppc_set_field(msr, 33, 33, 1);
-   msr = kvmppc_set_field(msr, 34, 36, 0);
-   msr = kvmppc_set_field(msr, 42, 47, 0);
-   kvmppc_set_msr_fast(vcpu, msr);
-   kvmppc_book3s_queue_irqprio(vcpu, 
BOOK3S_INTERRUPT_INST_STORAGE);
-   return EMULATE_AGAIN;
-   }
-
-   return EMULATE_DONE;
-}
-
-static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
-{
-
-   /* Need to do paired single emulation? */
-   if (!(vcpu-arch.hflags  BOOK3S_HFLAG_PAIRED_SINGLE))
-   return EMULATE_DONE;
-
-   /* Read out the instruction */
-   if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
-   /* Need to emulate */
-   return EMULATE_FAIL;
-
-   return EMULATE_AGAIN;
-}
-
 /* Handle external providers (FPU, Altivec, VSX) */
 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
 ulong msr)
@@ -1149,31 +1113,49 @@ program_interrupt:
case BOOK3S_INTERRUPT_VSX:
{
int ext_msr = 0;
+   int emul;
+   ulong pc;
+   u32 last_inst;
+
+   if (vcpu-arch.hflags  BOOK3S_HFLAG_PAIRED_SINGLE) {
+   /* Do paired single instruction emulation */
+   pc = kvmppc_get_pc(vcpu);
+   last_inst = kvmppc_get_last_inst(vcpu);
+   emul = kvmppc_ld(vcpu, pc, sizeof(u32), last_inst,
+false);
+   if (emul == EMULATE_DONE)
+   goto program_interrupt;
+   else
+   r = RESUME_GUEST;
 
-   switch (exit_nr) {
-   case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP;  break;
-   case BOOK3S_INTERRUPT_ALTIVEC:ext_msr = MSR_VEC; break;
-   case BOOK3S_INTERRUPT_VSX:ext_msr = MSR_VSX; break;
+   break;
}
 
-   switch (kvmppc_check_ext(vcpu, exit_nr)) {
-   case EMULATE_DONE:
-   /* everything ok - let's enable the ext */
-   r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
+   /* Enable external provider */
+   switch (exit_nr) {
+   case BOOK3S_INTERRUPT_FP_UNAVAIL:
+   ext_msr = MSR_FP;
break;
-   case EMULATE_FAIL:
-   /* we need to emulate this instruction */
-   goto program_interrupt;
+
+   case BOOK3S_INTERRUPT_ALTIVEC:
+   ext_msr = MSR_VEC;
break;
-   default:
-   /* nothing to worry about - go again */
+
+   case BOOK3S_INTERRUPT_VSX:
+   ext_msr = MSR_VSX;
break;
}
+
+   r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
break;
}
case BOOK3S_INTERRUPT_ALIGNMENT:
-   if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
-   u32 last_inst = kvmppc_get_last_inst(vcpu);
+   {
+   ulong pc = kvmppc_get_pc(vcpu);
+   u32 last_inst = kvmppc_get_last_inst(vcpu);
+   int emul = kvmppc_ld(vcpu, pc, sizeof(u32), last_inst, false);
+
+   if (emul == EMULATE_DONE) {
u32 dsisr

[PATCH v6 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail

2014-07-23 Thread Mihai Caraman
On book3e, guest last instruction is read on the exit path using load
external pid (lwepx) dedicated instruction. This load operation may fail
due to TLB eviction and execute-but-not-read entries.

This patch lay down the path for an alternative solution to read the guest
last instruction, by allowing kvmppc_get_lat_inst() function to fail.
Architecture specific implmentations of kvmppc_load_last_inst() may read
last guest instruction and instruct the emulation layer to re-execute the
guest in case of failure.

Make kvmppc_get_last_inst() definition common between architectures.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v6:
 - rewrite kvmppc_get_last_inst() swap code to be understood at a glimpse :)
 - use inst in kvmppc_load_last_inst
 - these changes compile on book3s, please validate the functionality and
   do the necessary changes!

v5:
 - don't swap when load fail
 - convert the return value space of kvmppc_ld()

v4:
 - common declaration and enum for kvmppc_load_last_inst()
 - remove kvmppc_read_inst() in a preceding patch

v3:
 - rework patch description
 - add common definition for kvmppc_get_last_inst()
 - check return values in book3s code

v2:
 - integrated kvmppc_get_last_inst() in book3s code and checked build
 - addressed cosmetic feedback

 arch/powerpc/include/asm/kvm_book3s.h| 26 --
 arch/powerpc/include/asm/kvm_booke.h |  5 
 arch/powerpc/include/asm/kvm_ppc.h   | 31 ++
 arch/powerpc/kvm/book3s.c| 17 
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 17 
 arch/powerpc/kvm/book3s_paired_singles.c | 38 +--
 arch/powerpc/kvm/book3s_pr.c | 45 +++-
 arch/powerpc/kvm/booke.c |  3 +++
 arch/powerpc/kvm/e500_mmu_host.c |  6 +
 arch/powerpc/kvm/emulate.c   | 18 -
 arch/powerpc/kvm/powerpc.c   | 11 ++--
 11 files changed, 140 insertions(+), 77 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 20fb6f2..a86ca65 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return (kvmppc_get_msr(vcpu)  MSR_LE) != (MSR_KERNEL  MSR_LE);
 }
 
-static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong 
pc)
-{
-   /* Load the instruction manually if it failed to do so in the
-* exit path */
-   if (vcpu-arch.last_inst == KVM_INST_FETCH_FAILED)
-   kvmppc_ld(vcpu, pc, sizeof(u32), vcpu-arch.last_inst, false);
-
-   return kvmppc_need_byteswap(vcpu) ? swab32(vcpu-arch.last_inst) :
-   vcpu-arch.last_inst;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
-}
-
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 {
return vcpu-arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_booke.h 
b/arch/powerpc/include/asm/kvm_booke.h
index c7aed61..cbb1990 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return false;
 }
 
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return vcpu-arch.last_inst;
-}
-
 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 {
vcpu-arch.ctr = val;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index e2fd5a1..2da5f547 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -47,6 +47,11 @@ enum emulation_result {
EMULATE_EXIT_USER,/* emulation requires exit to user-space */
 };
 
+enum instruction_type {
+   INST_GENERIC,
+   INST_SC,/* system call */
+};
+
 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
@@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
   u64 val, unsigned int bytes,
   int is_default_endian);
 
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+enum instruction_type type, u32 *inst);
+
 extern int kvmppc_emulate_instruction(struct kvm_run *run

[PATCH v6 0/5] Read guest last instruction from kvmppc_get_last_inst()

2014-07-23 Thread Mihai Caraman
Read guest last instruction from kvmppc_get_last_inst() allowing the function
to fail in order to emulate again. On bookehv architecture search for
the physical address and kmap it, instead of using Load External PID (lwepx)
instruction. This fixes an infinite loop caused by lwepx's data TLB miss
exception handled in the host and the TODO for execute-but-not-read entries
and TLB eviction.

Mihai Caraman (5):
  KVM: PPC: e500mc: Revert add load inst fixup
  KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1
  KVM: PPC: Book3s: Remove kvmppc_read_inst() function
  KVM: PPC: Alow kvmppc_get_last_inst() to fail
  KVM: PPC: Bookehv: Get vcpu's last instruction for emulation

 arch/powerpc/include/asm/kvm_book3s.h|  26 ---
 arch/powerpc/include/asm/kvm_booke.h |   5 --
 arch/powerpc/include/asm/kvm_ppc.h   |  31 +
 arch/powerpc/include/asm/mmu-book3e.h|   9 ++-
 arch/powerpc/kvm/book3s.c|  17 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  17 ++---
 arch/powerpc/kvm/book3s_paired_singles.c |  38 +++
 arch/powerpc/kvm/book3s_pr.c | 114 ---
 arch/powerpc/kvm/booke.c |  47 +
 arch/powerpc/kvm/bookehv_interrupts.S|  55 ++-
 arch/powerpc/kvm/e500_mmu_host.c |  98 ++
 arch/powerpc/kvm/emulate.c   |  18 +++--
 arch/powerpc/kvm/powerpc.c   |  11 ++-
 13 files changed, 314 insertions(+), 172 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v6 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail

2014-07-23 Thread Mihai Caraman
On book3e, guest last instruction is read on the exit path using load
external pid (lwepx) dedicated instruction. This load operation may fail
due to TLB eviction and execute-but-not-read entries.

This patch lay down the path for an alternative solution to read the guest
last instruction, by allowing kvmppc_get_lat_inst() function to fail.
Architecture specific implmentations of kvmppc_load_last_inst() may read
last guest instruction and instruct the emulation layer to re-execute the
guest in case of failure.

Make kvmppc_get_last_inst() definition common between architectures.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v6:
 - rewrite kvmppc_get_last_inst() swap code to be understood at a glimpse :)
 - use inst in kvmppc_load_last_inst
 - these changes compile on book3s, please validate the functionality and
   do the necessary changes!

v5:
 - don't swap when load fail
 - convert the return value space of kvmppc_ld()

v4:
 - common declaration and enum for kvmppc_load_last_inst()
 - remove kvmppc_read_inst() in a preceding patch

v3:
 - rework patch description
 - add common definition for kvmppc_get_last_inst()
 - check return values in book3s code

v2:
 - integrated kvmppc_get_last_inst() in book3s code and checked build
 - addressed cosmetic feedback

 arch/powerpc/include/asm/kvm_book3s.h| 26 --
 arch/powerpc/include/asm/kvm_booke.h |  5 
 arch/powerpc/include/asm/kvm_ppc.h   | 31 ++
 arch/powerpc/kvm/book3s.c| 17 
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 17 
 arch/powerpc/kvm/book3s_paired_singles.c | 38 +--
 arch/powerpc/kvm/book3s_pr.c | 45 +++-
 arch/powerpc/kvm/booke.c |  3 +++
 arch/powerpc/kvm/e500_mmu_host.c |  6 +
 arch/powerpc/kvm/emulate.c   | 18 -
 arch/powerpc/kvm/powerpc.c   | 11 ++--
 11 files changed, 140 insertions(+), 77 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 20fb6f2..a86ca65 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return (kvmppc_get_msr(vcpu)  MSR_LE) != (MSR_KERNEL  MSR_LE);
 }
 
-static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong 
pc)
-{
-   /* Load the instruction manually if it failed to do so in the
-* exit path */
-   if (vcpu-arch.last_inst == KVM_INST_FETCH_FAILED)
-   kvmppc_ld(vcpu, pc, sizeof(u32), vcpu-arch.last_inst, false);
-
-   return kvmppc_need_byteswap(vcpu) ? swab32(vcpu-arch.last_inst) :
-   vcpu-arch.last_inst;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
-}
-
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 {
return vcpu-arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_booke.h 
b/arch/powerpc/include/asm/kvm_booke.h
index c7aed61..cbb1990 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return false;
 }
 
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return vcpu-arch.last_inst;
-}
-
 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 {
vcpu-arch.ctr = val;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index e2fd5a1..2da5f547 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -47,6 +47,11 @@ enum emulation_result {
EMULATE_EXIT_USER,/* emulation requires exit to user-space */
 };
 
+enum instruction_type {
+   INST_GENERIC,
+   INST_SC,/* system call */
+};
+
 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
@@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
   u64 val, unsigned int bytes,
   int is_default_endian);
 
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+enum instruction_type type, u32 *inst);
+
 extern int kvmppc_emulate_instruction(struct kvm_run *run

[PATCH v6 5/5] KVM: PPC: Bookehv: Get vcpu's last instruction for emulation

2014-07-23 Thread Mihai Caraman
On book3e, KVM uses load external pid (lwepx) dedicated instruction to read
guest last instruction on the exit path. lwepx exceptions (DTLB_MISS, DSI
and LRAT), generated by loading a guest address, needs to be handled by KVM.
These exceptions are generated in a substituted guest translation context
(EPLC[EGS] = 1) from host context (MSR[GS] = 0).

Currently, KVM hooks only interrupts generated from guest context (MSR[GS] = 1),
doing minimal checks on the fast path to avoid host performance degradation.
lwepx exceptions originate from host state (MSR[GS] = 0) which implies
additional checks in DO_KVM macro (beside the current MSR[GS] = 1) by looking
at the Exception Syndrome Register (ESR[EPID]) and the External PID Load Context
Register (EPLC[EGS]). Doing this on each Data TLB miss exception is obvious
too intrusive for the host.

Read guest last instruction from kvmppc_load_last_inst() by searching for the
physical address and kmap it. This address the TODO for TLB eviction and
execute-but-not-read entries, and allow us to get rid of lwepx until we are
able to handle failures.

A simple stress benchmark shows a 1% sys performance degradation compared with
previous approach (lwepx without failure handling):

time for i in `seq 1 1`; do /bin/echo  /dev/null; done

real0m 8.85s
user0m 4.34s
sys 0m 4.48s

vs

real0m 8.84s
user0m 4.36s
sys 0m 4.44s

A solution to use lwepx and to handle its exceptions in KVM would be to 
temporary
highjack the interrupt vector from host. This imposes additional 
synchronizations
for cores like FSL e6500 that shares host IVOR registers between hardware 
threads.
This optimized solution can be later developed on top of this patch.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v6:
 - no change

v5:
 - return ENULATE_AGAIN in case of failure

v4:
 - add switch and new function when getting last inst earlier
 - use enum instead of prev semnatic
 - get rid of mas0, optimize mas7_mas3
 - give more context in visible messages
 - check storage attributes mismatch on MMUv2
 - get rid of pfn_valid check

v3:
 - reworked patch description
 - use unaltered kmap addr for kunmap
 - get last instruction before beeing preempted

v2:
 - reworked patch description
 - used pr_* functions
 - addressed cosmetic feedback

 arch/powerpc/kvm/booke.c  | 44 +
 arch/powerpc/kvm/bookehv_interrupts.S | 37 --
 arch/powerpc/kvm/e500_mmu_host.c  | 92 +++
 3 files changed, 145 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 34a42b9..843077b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -869,6 +869,28 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
 }
 
+static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ enum emulation_result emulated, u32 last_inst)
+{
+   switch (emulated) {
+   case EMULATE_AGAIN:
+   return RESUME_GUEST;
+
+   case EMULATE_FAIL:
+   pr_debug(%s: load instruction from guest address %lx failed\n,
+  __func__, vcpu-arch.pc);
+   /* For debugging, encode the failing instruction and
+* report it to userspace. */
+   run-hw.hardware_exit_reason = ~0ULL  32;
+   run-hw.hardware_exit_reason |= last_inst;
+   kvmppc_core_queue_program(vcpu, ESR_PIL);
+   return RESUME_HOST;
+
+   default:
+   BUG();
+   }
+}
+
 /**
  * kvmppc_handle_exit
  *
@@ -880,6 +902,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu 
*vcpu,
int r = RESUME_HOST;
int s;
int idx;
+   u32 last_inst = KVM_INST_FETCH_FAILED;
+   enum emulation_result emulated = EMULATE_DONE;
 
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -887,6 +911,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
/* restart interrupts if they were meant for the host */
kvmppc_restart_interrupt(vcpu, exit_nr);
 
+   /*
+* get last instruction before beeing preempted
+* TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR  ESR_DATA
+*/
+   switch (exit_nr) {
+   case BOOKE_INTERRUPT_DATA_STORAGE:
+   case BOOKE_INTERRUPT_DTLB_MISS:
+   case BOOKE_INTERRUPT_HV_PRIV:
+   emulated = kvmppc_get_last_inst(vcpu, false, last_inst);
+   break;
+   default:
+   break;
+   }
+
local_irq_enable();
 
trace_kvm_exit(exit_nr, vcpu);
@@ -895,6 +933,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-exit_reason = KVM_EXIT_UNKNOWN;
run-ready_for_interrupt_injection = 1;
 
+   if (emulated != EMULATE_DONE) {
+   r = kvmppc_resume_inst_load(run

[PATCH v5 1/5] KVM: PPC: e500mc: Revert add load inst fixup

2014-07-17 Thread Mihai Caraman
The commit 1d628af7 add load inst fixup made an attempt to handle
failures generated by reading the guest current instruction. The fixup
code that was added works by chance hiding the real issue.

Load external pid (lwepx) instruction, used by KVM to read guest
instructions, is executed in a subsituted guest translation context
(EPLC[EGS] = 1). In consequence lwepx's TLB error and data storage
interrupts need to be handled by KVM, even though these interrupts
are generated from host context (MSR[GS] = 0) where lwepx is executed.

Currently, KVM hooks only interrupts generated from guest context
(MSR[GS] = 1), doing minimal checks on the fast path to avoid host
performance degradation. As a result, the host kernel handles lwepx
faults searching the faulting guest data address (loaded in DEAR) in
its own Logical Partition ID (LPID) 0 context. In case a host translation
is found the execution returns to the lwepx instruction instead of the
fixup, the host ending up in an infinite loop.

Revert the commit add load inst fixup. lwepx issue will be addressed
in a subsequent patch without needing fixup code.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5-v2:
 - no change

 arch/powerpc/kvm/bookehv_interrupts.S | 26 +-
 1 file changed, 1 insertion(+), 25 deletions(-)

diff --git a/arch/powerpc/kvm/bookehv_interrupts.S 
b/arch/powerpc/kvm/bookehv_interrupts.S
index a1712b8..6ff4480 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -29,7 +29,6 @@
 #include asm/asm-compat.h
 #include asm/asm-offsets.h
 #include asm/bitsperlong.h
-#include asm/thread_info.h
 
 #ifdef CONFIG_64BIT
 #include asm/exception-64e.h
@@ -164,32 +163,9 @@
PPC_STL r30, VCPU_GPR(R30)(r4)
PPC_STL r31, VCPU_GPR(R31)(r4)
mtspr   SPRN_EPLC, r8
-
-   /* disable preemption, so we are sure we hit the fixup handler */
-   CURRENT_THREAD_INFO(r8, r1)
-   li  r7, 1
-   stw r7, TI_PREEMPT(r8)
-
isync
-
-   /*
-* In case the read goes wrong, we catch it and write an invalid value
-* in LAST_INST instead.
-*/
-1: lwepx   r9, 0, r5
-2:
-.section .fixup, ax
-3: li  r9, KVM_INST_FETCH_FAILED
-   b   2b
-.previous
-.section __ex_table,a
-   PPC_LONG_ALIGN
-   PPC_LONG 1b,3b
-.previous
-
+   lwepx   r9, 0, r5
mtspr   SPRN_EPLC, r3
-   li  r7, 0
-   stw r7, TI_PREEMPT(r8)
stw r9, VCPU_LAST_INST(r4)
.endif
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 5/5] KVM: PPC: Bookehv: Get vcpu's last instruction for emulation

2014-07-17 Thread Mihai Caraman
On book3e, KVM uses load external pid (lwepx) dedicated instruction to read
guest last instruction on the exit path. lwepx exceptions (DTLB_MISS, DSI
and LRAT), generated by loading a guest address, needs to be handled by KVM.
These exceptions are generated in a substituted guest translation context
(EPLC[EGS] = 1) from host context (MSR[GS] = 0).

Currently, KVM hooks only interrupts generated from guest context (MSR[GS] = 1),
doing minimal checks on the fast path to avoid host performance degradation.
lwepx exceptions originate from host state (MSR[GS] = 0) which implies
additional checks in DO_KVM macro (beside the current MSR[GS] = 1) by looking
at the Exception Syndrome Register (ESR[EPID]) and the External PID Load Context
Register (EPLC[EGS]). Doing this on each Data TLB miss exception is obvious
too intrusive for the host.

Read guest last instruction from kvmppc_load_last_inst() by searching for the
physical address and kmap it. This address the TODO for TLB eviction and
execute-but-not-read entries, and allow us to get rid of lwepx until we are
able to handle failures.

A simple stress benchmark shows a 1% sys performance degradation compared with
previous approach (lwepx without failure handling):

time for i in `seq 1 1`; do /bin/echo  /dev/null; done

real0m 8.85s
user0m 4.34s
sys 0m 4.48s

vs

real0m 8.84s
user0m 4.36s
sys 0m 4.44s

A solution to use lwepx and to handle its exceptions in KVM would be to 
temporary
highjack the interrupt vector from host. This imposes additional 
synchronizations
for cores like FSL e6500 that shares host IVOR registers between hardware 
threads.
This optimized solution can be later developed on top of this patch.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5:
 - return ENULATE_AGAIN in case of failure

v4:
 - add switch and new function when getting last inst earlier
 - use enum instead of prev semnatic
 - get rid of mas0, optimize mas7_mas3
 - give more context in visible messages
 - check storage attributes mismatch on MMUv2
 - get rid of pfn_valid check

v3:
 - reworked patch description
 - use unaltered kmap addr for kunmap
 - get last instruction before beeing preempted

v2:
 - reworked patch description
 - used pr_* functions
 - addressed cosmetic feedback

 arch/powerpc/kvm/booke.c  | 44 +
 arch/powerpc/kvm/bookehv_interrupts.S | 37 --
 arch/powerpc/kvm/e500_mmu_host.c  | 92 +++
 3 files changed, 145 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 34a42b9..843077b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -869,6 +869,28 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
 }
 
+static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ enum emulation_result emulated, u32 last_inst)
+{
+   switch (emulated) {
+   case EMULATE_AGAIN:
+   return RESUME_GUEST;
+
+   case EMULATE_FAIL:
+   pr_debug(%s: load instruction from guest address %lx failed\n,
+  __func__, vcpu-arch.pc);
+   /* For debugging, encode the failing instruction and
+* report it to userspace. */
+   run-hw.hardware_exit_reason = ~0ULL  32;
+   run-hw.hardware_exit_reason |= last_inst;
+   kvmppc_core_queue_program(vcpu, ESR_PIL);
+   return RESUME_HOST;
+
+   default:
+   BUG();
+   }
+}
+
 /**
  * kvmppc_handle_exit
  *
@@ -880,6 +902,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu 
*vcpu,
int r = RESUME_HOST;
int s;
int idx;
+   u32 last_inst = KVM_INST_FETCH_FAILED;
+   enum emulation_result emulated = EMULATE_DONE;
 
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -887,6 +911,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
/* restart interrupts if they were meant for the host */
kvmppc_restart_interrupt(vcpu, exit_nr);
 
+   /*
+* get last instruction before beeing preempted
+* TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR  ESR_DATA
+*/
+   switch (exit_nr) {
+   case BOOKE_INTERRUPT_DATA_STORAGE:
+   case BOOKE_INTERRUPT_DTLB_MISS:
+   case BOOKE_INTERRUPT_HV_PRIV:
+   emulated = kvmppc_get_last_inst(vcpu, false, last_inst);
+   break;
+   default:
+   break;
+   }
+
local_irq_enable();
 
trace_kvm_exit(exit_nr, vcpu);
@@ -895,6 +933,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-exit_reason = KVM_EXIT_UNKNOWN;
run-ready_for_interrupt_injection = 1;
 
+   if (emulated != EMULATE_DONE) {
+   r = kvmppc_resume_inst_load(run, vcpu, emulated

[PATCH v5 0/5] Read guest last instruction from kvmppc_get_last_inst()

2014-07-17 Thread Mihai Caraman
Read guest last instruction from kvmppc_get_last_inst() allowing the function
to fail in order to emulate again. On bookehv architecture search for
the physical address and kmap it, instead of using Load External PID (lwepx)
instruction. This fixes an infinite loop caused by lwepx's data TLB miss
exception handled in the host and the TODO for execute-but-not-read entries
and TLB eviction.

Mihai Caraman (5):
  KVM: PPC: e500mc: Revert add load inst fixup
  KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1
  KVM: PPC: Book3s: Remove kvmppc_read_inst() function
  KVM: PPC: Alow kvmppc_get_last_inst() to fail
  KVM: PPC: Bookehv: Get vcpu's last instruction for  emulation

 arch/powerpc/include/asm/kvm_book3s.h|  26 ---
 arch/powerpc/include/asm/kvm_booke.h |   5 --
 arch/powerpc/include/asm/kvm_ppc.h   |  25 +++
 arch/powerpc/include/asm/mmu-book3e.h|   9 ++-
 arch/powerpc/kvm/book3s.c|  17 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  17 ++---
 arch/powerpc/kvm/book3s_paired_singles.c |  38 +++
 arch/powerpc/kvm/book3s_pr.c | 114 ---
 arch/powerpc/kvm/booke.c |  47 +
 arch/powerpc/kvm/bookehv_interrupts.S|  55 ++-
 arch/powerpc/kvm/e500_mmu_host.c |  98 ++
 arch/powerpc/kvm/emulate.c   |  18 +++--
 arch/powerpc/kvm/powerpc.c   |  11 ++-
 13 files changed, 309 insertions(+), 171 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 3/5] KVM: PPC: Book3s: Remove kvmppc_read_inst() function

2014-07-17 Thread Mihai Caraman
In the context of replacing kvmppc_ld() function calls with a version of
kvmppc_get_last_inst() which allow to fail, Alex Graf suggested this:

If we get EMULATE_AGAIN, we just have to make sure we go back into the guest.
No need to inject an ISI into  the guest - it'll do that all by itself.
With an error returning kvmppc_get_last_inst we can just use completely
get rid of kvmppc_read_inst() and only use kvmppc_get_last_inst() instead.

As a intermediate step get rid of kvmppc_read_inst() and only use kvmppc_ld()
instead.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5:
 - make paired single emulation the unusual

v4:
 - new patch

 arch/powerpc/kvm/book3s_pr.c | 91 ++--
 1 file changed, 37 insertions(+), 54 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e40765f..02a983e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -710,42 +710,6 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong 
fac)
 #endif
 }
 
-static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
-{
-   ulong srr0 = kvmppc_get_pc(vcpu);
-   u32 last_inst = kvmppc_get_last_inst(vcpu);
-   int ret;
-
-   ret = kvmppc_ld(vcpu, srr0, sizeof(u32), last_inst, false);
-   if (ret == -ENOENT) {
-   ulong msr = kvmppc_get_msr(vcpu);
-
-   msr = kvmppc_set_field(msr, 33, 33, 1);
-   msr = kvmppc_set_field(msr, 34, 36, 0);
-   msr = kvmppc_set_field(msr, 42, 47, 0);
-   kvmppc_set_msr_fast(vcpu, msr);
-   kvmppc_book3s_queue_irqprio(vcpu, 
BOOK3S_INTERRUPT_INST_STORAGE);
-   return EMULATE_AGAIN;
-   }
-
-   return EMULATE_DONE;
-}
-
-static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
-{
-
-   /* Need to do paired single emulation? */
-   if (!(vcpu-arch.hflags  BOOK3S_HFLAG_PAIRED_SINGLE))
-   return EMULATE_DONE;
-
-   /* Read out the instruction */
-   if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
-   /* Need to emulate */
-   return EMULATE_FAIL;
-
-   return EMULATE_AGAIN;
-}
-
 /* Handle external providers (FPU, Altivec, VSX) */
 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
 ulong msr)
@@ -1149,31 +1113,49 @@ program_interrupt:
case BOOK3S_INTERRUPT_VSX:
{
int ext_msr = 0;
+   int emul;
+   ulong pc;
+   u32 last_inst;
+
+   if (vcpu-arch.hflags  BOOK3S_HFLAG_PAIRED_SINGLE) {
+   /* Emulate the instruction */
+
+   pc = kvmppc_get_pc(vcpu);
+   last_inst = kvmppc_get_last_inst(vcpu);
+   emul = kvmppc_ld(vcpu, pc, sizeof(u32), last_inst,
+false);
+   if (emul == EMULATE_DONE)
+   goto program_interrupt;
+   else
+   r = RESUME_GUEST;
+   } else {
+   /* Do paired single emulation */
 
-   switch (exit_nr) {
-   case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP;  break;
-   case BOOK3S_INTERRUPT_ALTIVEC:ext_msr = MSR_VEC; break;
-   case BOOK3S_INTERRUPT_VSX:ext_msr = MSR_VSX; break;
-   }
+   switch (exit_nr) {
+   case BOOK3S_INTERRUPT_FP_UNAVAIL:
+   ext_msr = MSR_FP;
+   break;
+
+   case BOOK3S_INTERRUPT_ALTIVEC:
+   ext_msr = MSR_VEC;
+   break;
+
+   case BOOK3S_INTERRUPT_VSX:
+   ext_msr = MSR_VSX;
+   break;
+   }
 
-   switch (kvmppc_check_ext(vcpu, exit_nr)) {
-   case EMULATE_DONE:
-   /* everything ok - let's enable the ext */
r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
-   break;
-   case EMULATE_FAIL:
-   /* we need to emulate this instruction */
-   goto program_interrupt;
-   break;
-   default:
-   /* nothing to worry about - go again */
-   break;
}
break;
}
case BOOK3S_INTERRUPT_ALIGNMENT:
-   if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
-   u32 last_inst = kvmppc_get_last_inst(vcpu);
+   {
+   ulong pc = kvmppc_get_pc(vcpu);
+   u32 last_inst = kvmppc_get_last_inst(vcpu);
+   int emul = kvmppc_ld(vcpu, pc, sizeof(u32), last_inst, false);
+
+   if (emul

[PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail

2014-07-17 Thread Mihai Caraman
On book3e, guest last instruction is read on the exit path using load
external pid (lwepx) dedicated instruction. This load operation may fail
due to TLB eviction and execute-but-not-read entries.

This patch lay down the path for an alternative solution to read the guest
last instruction, by allowing kvmppc_get_lat_inst() function to fail.
Architecture specific implmentations of kvmppc_load_last_inst() may read
last guest instruction and instruct the emulation layer to re-execute the
guest in case of failure.

Make kvmppc_get_last_inst() definition common between architectures.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5
 - don't swap when load fail
 - convert the return value space of kvmppc_ld()

v4:
 - these changes compile on book3s, please validate the functionality and
   do the necessary adaptations!
 - common declaration and enum for kvmppc_load_last_inst()
 - remove kvmppc_read_inst() in a preceding patch

v3:
 - rework patch description
 - add common definition for kvmppc_get_last_inst()
 - check return values in book3s code

v2:
 - integrated kvmppc_get_last_inst() in book3s code and checked build
 - addressed cosmetic feedback

 arch/powerpc/include/asm/kvm_book3s.h| 26 -
 arch/powerpc/include/asm/kvm_booke.h |  5 ---
 arch/powerpc/include/asm/kvm_ppc.h   | 25 +
 arch/powerpc/kvm/book3s.c| 17 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 17 +++--
 arch/powerpc/kvm/book3s_paired_singles.c | 38 ---
 arch/powerpc/kvm/book3s_pr.c | 63 ++--
 arch/powerpc/kvm/booke.c |  3 ++
 arch/powerpc/kvm/e500_mmu_host.c |  6 +++
 arch/powerpc/kvm/emulate.c   | 18 ++---
 arch/powerpc/kvm/powerpc.c   | 11 +-
 11 files changed, 144 insertions(+), 85 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 20fb6f2..a86ca65 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return (kvmppc_get_msr(vcpu)  MSR_LE) != (MSR_KERNEL  MSR_LE);
 }
 
-static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong 
pc)
-{
-   /* Load the instruction manually if it failed to do so in the
-* exit path */
-   if (vcpu-arch.last_inst == KVM_INST_FETCH_FAILED)
-   kvmppc_ld(vcpu, pc, sizeof(u32), vcpu-arch.last_inst, false);
-
-   return kvmppc_need_byteswap(vcpu) ? swab32(vcpu-arch.last_inst) :
-   vcpu-arch.last_inst;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
-}
-
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 {
return vcpu-arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_booke.h 
b/arch/powerpc/include/asm/kvm_booke.h
index c7aed61..cbb1990 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return false;
 }
 
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return vcpu-arch.last_inst;
-}
-
 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 {
vcpu-arch.ctr = val;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index e2fd5a1..7f9c634 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -47,6 +47,11 @@ enum emulation_result {
EMULATE_EXIT_USER,/* emulation requires exit to user-space */
 };
 
+enum instruction_type {
+   INST_GENERIC,
+   INST_SC,/* system call */
+};
+
 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
@@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
   u64 val, unsigned int bytes,
   int is_default_endian);
 
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+enum instruction_type type, u32 *inst);
+
 extern int kvmppc_emulate_instruction(struct kvm_run *run,
   struct kvm_vcpu *vcpu);
 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
@@ -234,6 +242,23 @@ struct

[PATCH v5 2/5] KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1

2014-07-17 Thread Mihai Caraman
Add mising defines MAS0_GET_TLBSEL() and MAS1_GET_TSIZE() for Book3E.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5-v2:
 - no change

 arch/powerpc/include/asm/mmu-book3e.h | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 8d24f78..cd4f04a 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,9 +40,11 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL_MASK0x3000
-#define MAS0_TLBSEL_SHIFT   28
-#define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
+#define MAS0_TLBSEL_MASK   0x3000
+#define MAS0_TLBSEL_SHIFT  28
+#define MAS0_TLBSEL(x) (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
+#define MAS0_GET_TLBSEL(mas0)  (((mas0)  MAS0_TLBSEL_MASK)  \
+   MAS0_TLBSEL_SHIFT)
 #define MAS0_ESEL_MASK 0x0FFF
 #define MAS0_ESEL_SHIFT16
 #define MAS0_ESEL(x)   (((x)  MAS0_ESEL_SHIFT)  MAS0_ESEL_MASK)
@@ -60,6 +62,7 @@
 #define MAS1_TSIZE_MASK0x0f80
 #define MAS1_TSIZE_SHIFT   7
 #define MAS1_TSIZE(x)  (((x)  MAS1_TSIZE_SHIFT)  MAS1_TSIZE_MASK)
+#define MAS1_GET_TSIZE(mas1)   (((mas1)  MAS1_TSIZE_MASK)  MAS1_TSIZE_SHIFT)
 
 #define MAS2_EPN   (~0xFFFUL)
 #define MAS2_X00x0040
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail

2014-07-17 Thread Mihai Caraman
On book3e, guest last instruction is read on the exit path using load
external pid (lwepx) dedicated instruction. This load operation may fail
due to TLB eviction and execute-but-not-read entries.

This patch lay down the path for an alternative solution to read the guest
last instruction, by allowing kvmppc_get_lat_inst() function to fail.
Architecture specific implmentations of kvmppc_load_last_inst() may read
last guest instruction and instruct the emulation layer to re-execute the
guest in case of failure.

Make kvmppc_get_last_inst() definition common between architectures.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5
 - don't swap when load fail
 - convert the return value space of kvmppc_ld()

v4:
 - these changes compile on book3s, please validate the functionality and
   do the necessary adaptations!
 - common declaration and enum for kvmppc_load_last_inst()
 - remove kvmppc_read_inst() in a preceding patch

v3:
 - rework patch description
 - add common definition for kvmppc_get_last_inst()
 - check return values in book3s code

v2:
 - integrated kvmppc_get_last_inst() in book3s code and checked build
 - addressed cosmetic feedback

 arch/powerpc/include/asm/kvm_book3s.h| 26 -
 arch/powerpc/include/asm/kvm_booke.h |  5 ---
 arch/powerpc/include/asm/kvm_ppc.h   | 25 +
 arch/powerpc/kvm/book3s.c| 17 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 17 +++--
 arch/powerpc/kvm/book3s_paired_singles.c | 38 ---
 arch/powerpc/kvm/book3s_pr.c | 63 ++--
 arch/powerpc/kvm/booke.c |  3 ++
 arch/powerpc/kvm/e500_mmu_host.c |  6 +++
 arch/powerpc/kvm/emulate.c   | 18 ++---
 arch/powerpc/kvm/powerpc.c   | 11 +-
 11 files changed, 144 insertions(+), 85 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 20fb6f2..a86ca65 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return (kvmppc_get_msr(vcpu)  MSR_LE) != (MSR_KERNEL  MSR_LE);
 }
 
-static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong 
pc)
-{
-   /* Load the instruction manually if it failed to do so in the
-* exit path */
-   if (vcpu-arch.last_inst == KVM_INST_FETCH_FAILED)
-   kvmppc_ld(vcpu, pc, sizeof(u32), vcpu-arch.last_inst, false);
-
-   return kvmppc_need_byteswap(vcpu) ? swab32(vcpu-arch.last_inst) :
-   vcpu-arch.last_inst;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
-}
-
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 {
return vcpu-arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_booke.h 
b/arch/powerpc/include/asm/kvm_booke.h
index c7aed61..cbb1990 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return false;
 }
 
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return vcpu-arch.last_inst;
-}
-
 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 {
vcpu-arch.ctr = val;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index e2fd5a1..7f9c634 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -47,6 +47,11 @@ enum emulation_result {
EMULATE_EXIT_USER,/* emulation requires exit to user-space */
 };
 
+enum instruction_type {
+   INST_GENERIC,
+   INST_SC,/* system call */
+};
+
 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
@@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
   u64 val, unsigned int bytes,
   int is_default_endian);
 
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+enum instruction_type type, u32 *inst);
+
 extern int kvmppc_emulate_instruction(struct kvm_run *run,
   struct kvm_vcpu *vcpu);
 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
@@ -234,6 +242,23 @@ struct

[PATCH v5 1/5] KVM: PPC: e500mc: Revert add load inst fixup

2014-07-17 Thread Mihai Caraman
The commit 1d628af7 add load inst fixup made an attempt to handle
failures generated by reading the guest current instruction. The fixup
code that was added works by chance hiding the real issue.

Load external pid (lwepx) instruction, used by KVM to read guest
instructions, is executed in a subsituted guest translation context
(EPLC[EGS] = 1). In consequence lwepx's TLB error and data storage
interrupts need to be handled by KVM, even though these interrupts
are generated from host context (MSR[GS] = 0) where lwepx is executed.

Currently, KVM hooks only interrupts generated from guest context
(MSR[GS] = 1), doing minimal checks on the fast path to avoid host
performance degradation. As a result, the host kernel handles lwepx
faults searching the faulting guest data address (loaded in DEAR) in
its own Logical Partition ID (LPID) 0 context. In case a host translation
is found the execution returns to the lwepx instruction instead of the
fixup, the host ending up in an infinite loop.

Revert the commit add load inst fixup. lwepx issue will be addressed
in a subsequent patch without needing fixup code.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5-v2:
 - no change

 arch/powerpc/kvm/bookehv_interrupts.S | 26 +-
 1 file changed, 1 insertion(+), 25 deletions(-)

diff --git a/arch/powerpc/kvm/bookehv_interrupts.S 
b/arch/powerpc/kvm/bookehv_interrupts.S
index a1712b8..6ff4480 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -29,7 +29,6 @@
 #include asm/asm-compat.h
 #include asm/asm-offsets.h
 #include asm/bitsperlong.h
-#include asm/thread_info.h
 
 #ifdef CONFIG_64BIT
 #include asm/exception-64e.h
@@ -164,32 +163,9 @@
PPC_STL r30, VCPU_GPR(R30)(r4)
PPC_STL r31, VCPU_GPR(R31)(r4)
mtspr   SPRN_EPLC, r8
-
-   /* disable preemption, so we are sure we hit the fixup handler */
-   CURRENT_THREAD_INFO(r8, r1)
-   li  r7, 1
-   stw r7, TI_PREEMPT(r8)
-
isync
-
-   /*
-* In case the read goes wrong, we catch it and write an invalid value
-* in LAST_INST instead.
-*/
-1: lwepx   r9, 0, r5
-2:
-.section .fixup, ax
-3: li  r9, KVM_INST_FETCH_FAILED
-   b   2b
-.previous
-.section __ex_table,a
-   PPC_LONG_ALIGN
-   PPC_LONG 1b,3b
-.previous
-
+   lwepx   r9, 0, r5
mtspr   SPRN_EPLC, r3
-   li  r7, 0
-   stw r7, TI_PREEMPT(r8)
stw r9, VCPU_LAST_INST(r4)
.endif
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 3/5] KVM: PPC: Book3s: Remove kvmppc_read_inst() function

2014-07-17 Thread Mihai Caraman
In the context of replacing kvmppc_ld() function calls with a version of
kvmppc_get_last_inst() which allow to fail, Alex Graf suggested this:

If we get EMULATE_AGAIN, we just have to make sure we go back into the guest.
No need to inject an ISI into  the guest - it'll do that all by itself.
With an error returning kvmppc_get_last_inst we can just use completely
get rid of kvmppc_read_inst() and only use kvmppc_get_last_inst() instead.

As a intermediate step get rid of kvmppc_read_inst() and only use kvmppc_ld()
instead.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5:
 - make paired single emulation the unusual

v4:
 - new patch

 arch/powerpc/kvm/book3s_pr.c | 91 ++--
 1 file changed, 37 insertions(+), 54 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e40765f..02a983e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -710,42 +710,6 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong 
fac)
 #endif
 }
 
-static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
-{
-   ulong srr0 = kvmppc_get_pc(vcpu);
-   u32 last_inst = kvmppc_get_last_inst(vcpu);
-   int ret;
-
-   ret = kvmppc_ld(vcpu, srr0, sizeof(u32), last_inst, false);
-   if (ret == -ENOENT) {
-   ulong msr = kvmppc_get_msr(vcpu);
-
-   msr = kvmppc_set_field(msr, 33, 33, 1);
-   msr = kvmppc_set_field(msr, 34, 36, 0);
-   msr = kvmppc_set_field(msr, 42, 47, 0);
-   kvmppc_set_msr_fast(vcpu, msr);
-   kvmppc_book3s_queue_irqprio(vcpu, 
BOOK3S_INTERRUPT_INST_STORAGE);
-   return EMULATE_AGAIN;
-   }
-
-   return EMULATE_DONE;
-}
-
-static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
-{
-
-   /* Need to do paired single emulation? */
-   if (!(vcpu-arch.hflags  BOOK3S_HFLAG_PAIRED_SINGLE))
-   return EMULATE_DONE;
-
-   /* Read out the instruction */
-   if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
-   /* Need to emulate */
-   return EMULATE_FAIL;
-
-   return EMULATE_AGAIN;
-}
-
 /* Handle external providers (FPU, Altivec, VSX) */
 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
 ulong msr)
@@ -1149,31 +1113,49 @@ program_interrupt:
case BOOK3S_INTERRUPT_VSX:
{
int ext_msr = 0;
+   int emul;
+   ulong pc;
+   u32 last_inst;
+
+   if (vcpu-arch.hflags  BOOK3S_HFLAG_PAIRED_SINGLE) {
+   /* Emulate the instruction */
+
+   pc = kvmppc_get_pc(vcpu);
+   last_inst = kvmppc_get_last_inst(vcpu);
+   emul = kvmppc_ld(vcpu, pc, sizeof(u32), last_inst,
+false);
+   if (emul == EMULATE_DONE)
+   goto program_interrupt;
+   else
+   r = RESUME_GUEST;
+   } else {
+   /* Do paired single emulation */
 
-   switch (exit_nr) {
-   case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP;  break;
-   case BOOK3S_INTERRUPT_ALTIVEC:ext_msr = MSR_VEC; break;
-   case BOOK3S_INTERRUPT_VSX:ext_msr = MSR_VSX; break;
-   }
+   switch (exit_nr) {
+   case BOOK3S_INTERRUPT_FP_UNAVAIL:
+   ext_msr = MSR_FP;
+   break;
+
+   case BOOK3S_INTERRUPT_ALTIVEC:
+   ext_msr = MSR_VEC;
+   break;
+
+   case BOOK3S_INTERRUPT_VSX:
+   ext_msr = MSR_VSX;
+   break;
+   }
 
-   switch (kvmppc_check_ext(vcpu, exit_nr)) {
-   case EMULATE_DONE:
-   /* everything ok - let's enable the ext */
r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
-   break;
-   case EMULATE_FAIL:
-   /* we need to emulate this instruction */
-   goto program_interrupt;
-   break;
-   default:
-   /* nothing to worry about - go again */
-   break;
}
break;
}
case BOOK3S_INTERRUPT_ALIGNMENT:
-   if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
-   u32 last_inst = kvmppc_get_last_inst(vcpu);
+   {
+   ulong pc = kvmppc_get_pc(vcpu);
+   u32 last_inst = kvmppc_get_last_inst(vcpu);
+   int emul = kvmppc_ld(vcpu, pc, sizeof(u32), last_inst, false);
+
+   if (emul

[PATCH v5 0/5] Read guest last instruction from kvmppc_get_last_inst()

2014-07-17 Thread Mihai Caraman
Read guest last instruction from kvmppc_get_last_inst() allowing the function
to fail in order to emulate again. On bookehv architecture search for
the physical address and kmap it, instead of using Load External PID (lwepx)
instruction. This fixes an infinite loop caused by lwepx's data TLB miss
exception handled in the host and the TODO for execute-but-not-read entries
and TLB eviction.

Mihai Caraman (5):
  KVM: PPC: e500mc: Revert add load inst fixup
  KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1
  KVM: PPC: Book3s: Remove kvmppc_read_inst() function
  KVM: PPC: Alow kvmppc_get_last_inst() to fail
  KVM: PPC: Bookehv: Get vcpu's last instruction for  emulation

 arch/powerpc/include/asm/kvm_book3s.h|  26 ---
 arch/powerpc/include/asm/kvm_booke.h |   5 --
 arch/powerpc/include/asm/kvm_ppc.h   |  25 +++
 arch/powerpc/include/asm/mmu-book3e.h|   9 ++-
 arch/powerpc/kvm/book3s.c|  17 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  17 ++---
 arch/powerpc/kvm/book3s_paired_singles.c |  38 +++
 arch/powerpc/kvm/book3s_pr.c | 114 ---
 arch/powerpc/kvm/booke.c |  47 +
 arch/powerpc/kvm/bookehv_interrupts.S|  55 ++-
 arch/powerpc/kvm/e500_mmu_host.c |  98 ++
 arch/powerpc/kvm/emulate.c   |  18 +++--
 arch/powerpc/kvm/powerpc.c   |  11 ++-
 13 files changed, 309 insertions(+), 171 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v5 5/5] KVM: PPC: Bookehv: Get vcpu's last instruction for emulation

2014-07-17 Thread Mihai Caraman
On book3e, KVM uses load external pid (lwepx) dedicated instruction to read
guest last instruction on the exit path. lwepx exceptions (DTLB_MISS, DSI
and LRAT), generated by loading a guest address, needs to be handled by KVM.
These exceptions are generated in a substituted guest translation context
(EPLC[EGS] = 1) from host context (MSR[GS] = 0).

Currently, KVM hooks only interrupts generated from guest context (MSR[GS] = 1),
doing minimal checks on the fast path to avoid host performance degradation.
lwepx exceptions originate from host state (MSR[GS] = 0) which implies
additional checks in DO_KVM macro (beside the current MSR[GS] = 1) by looking
at the Exception Syndrome Register (ESR[EPID]) and the External PID Load Context
Register (EPLC[EGS]). Doing this on each Data TLB miss exception is obvious
too intrusive for the host.

Read guest last instruction from kvmppc_load_last_inst() by searching for the
physical address and kmap it. This address the TODO for TLB eviction and
execute-but-not-read entries, and allow us to get rid of lwepx until we are
able to handle failures.

A simple stress benchmark shows a 1% sys performance degradation compared with
previous approach (lwepx without failure handling):

time for i in `seq 1 1`; do /bin/echo  /dev/null; done

real0m 8.85s
user0m 4.34s
sys 0m 4.48s

vs

real0m 8.84s
user0m 4.36s
sys 0m 4.44s

A solution to use lwepx and to handle its exceptions in KVM would be to 
temporary
highjack the interrupt vector from host. This imposes additional 
synchronizations
for cores like FSL e6500 that shares host IVOR registers between hardware 
threads.
This optimized solution can be later developed on top of this patch.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5:
 - return ENULATE_AGAIN in case of failure

v4:
 - add switch and new function when getting last inst earlier
 - use enum instead of prev semnatic
 - get rid of mas0, optimize mas7_mas3
 - give more context in visible messages
 - check storage attributes mismatch on MMUv2
 - get rid of pfn_valid check

v3:
 - reworked patch description
 - use unaltered kmap addr for kunmap
 - get last instruction before beeing preempted

v2:
 - reworked patch description
 - used pr_* functions
 - addressed cosmetic feedback

 arch/powerpc/kvm/booke.c  | 44 +
 arch/powerpc/kvm/bookehv_interrupts.S | 37 --
 arch/powerpc/kvm/e500_mmu_host.c  | 92 +++
 3 files changed, 145 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 34a42b9..843077b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -869,6 +869,28 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
 }
 
+static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ enum emulation_result emulated, u32 last_inst)
+{
+   switch (emulated) {
+   case EMULATE_AGAIN:
+   return RESUME_GUEST;
+
+   case EMULATE_FAIL:
+   pr_debug(%s: load instruction from guest address %lx failed\n,
+  __func__, vcpu-arch.pc);
+   /* For debugging, encode the failing instruction and
+* report it to userspace. */
+   run-hw.hardware_exit_reason = ~0ULL  32;
+   run-hw.hardware_exit_reason |= last_inst;
+   kvmppc_core_queue_program(vcpu, ESR_PIL);
+   return RESUME_HOST;
+
+   default:
+   BUG();
+   }
+}
+
 /**
  * kvmppc_handle_exit
  *
@@ -880,6 +902,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu 
*vcpu,
int r = RESUME_HOST;
int s;
int idx;
+   u32 last_inst = KVM_INST_FETCH_FAILED;
+   enum emulation_result emulated = EMULATE_DONE;
 
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -887,6 +911,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
/* restart interrupts if they were meant for the host */
kvmppc_restart_interrupt(vcpu, exit_nr);
 
+   /*
+* get last instruction before beeing preempted
+* TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR  ESR_DATA
+*/
+   switch (exit_nr) {
+   case BOOKE_INTERRUPT_DATA_STORAGE:
+   case BOOKE_INTERRUPT_DTLB_MISS:
+   case BOOKE_INTERRUPT_HV_PRIV:
+   emulated = kvmppc_get_last_inst(vcpu, false, last_inst);
+   break;
+   default:
+   break;
+   }
+
local_irq_enable();
 
trace_kvm_exit(exit_nr, vcpu);
@@ -895,6 +933,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-exit_reason = KVM_EXIT_UNKNOWN;
run-ready_for_interrupt_injection = 1;
 
+   if (emulated != EMULATE_DONE) {
+   r = kvmppc_resume_inst_load(run, vcpu, emulated

[PATCH v5 2/5] KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1

2014-07-17 Thread Mihai Caraman
Add mising defines MAS0_GET_TLBSEL() and MAS1_GET_TSIZE() for Book3E.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v5-v2:
 - no change

 arch/powerpc/include/asm/mmu-book3e.h | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 8d24f78..cd4f04a 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,9 +40,11 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL_MASK0x3000
-#define MAS0_TLBSEL_SHIFT   28
-#define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
+#define MAS0_TLBSEL_MASK   0x3000
+#define MAS0_TLBSEL_SHIFT  28
+#define MAS0_TLBSEL(x) (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
+#define MAS0_GET_TLBSEL(mas0)  (((mas0)  MAS0_TLBSEL_MASK)  \
+   MAS0_TLBSEL_SHIFT)
 #define MAS0_ESEL_MASK 0x0FFF
 #define MAS0_ESEL_SHIFT16
 #define MAS0_ESEL(x)   (((x)  MAS0_ESEL_SHIFT)  MAS0_ESEL_MASK)
@@ -60,6 +62,7 @@
 #define MAS1_TSIZE_MASK0x0f80
 #define MAS1_TSIZE_SHIFT   7
 #define MAS1_TSIZE(x)  (((x)  MAS1_TSIZE_SHIFT)  MAS1_TSIZE_MASK)
+#define MAS1_GET_TSIZE(mas1)   (((mas1)  MAS1_TSIZE_MASK)  MAS1_TSIZE_SHIFT)
 
 #define MAS2_EPN   (~0xFFFUL)
 #define MAS2_X00x0040
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2] KVM: PPC: e500: Emulate power management control SPR

2014-07-04 Thread Mihai Caraman
For FSL e6500 core the kernel uses power management SPR register (PWRMGTCR0)
to enable idle power down for cores and devices by setting up the idle count
period at boot time. With the host already controlling the power management
configuration the guest could simply benefit from it, so emulate guest request
as a general store.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - treat the operation as a general store

 arch/powerpc/include/asm/kvm_host.h |  1 +
 arch/powerpc/kvm/e500_emulate.c | 12 
 2 files changed, 13 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 62b2cee..faf2f0e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -584,6 +584,7 @@ struct kvm_vcpu_arch {
u32 mmucfg;
u32 eptcfg;
u32 epr;
+   u32 pwrmgtcr0;
u32 crit_save;
/* guest debug registers*/
struct debug_reg dbg_reg;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 002d517..c99c40e 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -250,6 +250,14 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong spr_va
spr_val);
break;
 
+   case SPRN_PWRMGTCR0:
+   /*
+* Guest relies on host power management configurations
+* Treat the request as a general store
+*/
+   vcpu-arch.pwrmgtcr0 = spr_val;
+   break;
+
/* extra exceptions */
case SPRN_IVOR32:
vcpu-arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
@@ -368,6 +376,10 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong *spr_v
*spr_val = vcpu-arch.eptcfg;
break;
 
+   case SPRN_PWRMGTCR0:
+   *spr_val = vcpu-arch.pwrmgtcr0;
+   break;
+
/* extra exceptions */
case SPRN_IVOR32:
*spr_val = vcpu-arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2] KVM: PPC: e500: Emulate power management control SPR

2014-07-04 Thread Mihai Caraman
For FSL e6500 core the kernel uses power management SPR register (PWRMGTCR0)
to enable idle power down for cores and devices by setting up the idle count
period at boot time. With the host already controlling the power management
configuration the guest could simply benefit from it, so emulate guest request
as a general store.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - treat the operation as a general store

 arch/powerpc/include/asm/kvm_host.h |  1 +
 arch/powerpc/kvm/e500_emulate.c | 12 
 2 files changed, 13 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 62b2cee..faf2f0e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -584,6 +584,7 @@ struct kvm_vcpu_arch {
u32 mmucfg;
u32 eptcfg;
u32 epr;
+   u32 pwrmgtcr0;
u32 crit_save;
/* guest debug registers*/
struct debug_reg dbg_reg;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 002d517..c99c40e 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -250,6 +250,14 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong spr_va
spr_val);
break;
 
+   case SPRN_PWRMGTCR0:
+   /*
+* Guest relies on host power management configurations
+* Treat the request as a general store
+*/
+   vcpu-arch.pwrmgtcr0 = spr_val;
+   break;
+
/* extra exceptions */
case SPRN_IVOR32:
vcpu-arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
@@ -368,6 +376,10 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong *spr_v
*spr_val = vcpu-arch.eptcfg;
break;
 
+   case SPRN_PWRMGTCR0:
+   *spr_val = vcpu-arch.pwrmgtcr0;
+   break;
+
/* extra exceptions */
case SPRN_IVOR32:
*spr_val = vcpu-arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 1/4] powerpc/booke64: Add LRAT next and max entries to tlb_core_data structure

2014-07-03 Thread Mihai Caraman
LRAT (Logical to Real Address Translation) is shared between hw threads.
Add LRAT next and max entries to tlb_core_data structure and initialize them.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/mmu-book3e.h | 7 +++
 arch/powerpc/include/asm/reg_booke.h  | 1 +
 arch/powerpc/mm/fsl_booke_mmu.c   | 8 
 3 files changed, 16 insertions(+)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 8d24f78..088fd9f 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -217,6 +217,12 @@
 #define TLBILX_T_CLASS26
 #define TLBILX_T_CLASS37
 
+/* LRATCFG bits */
+#define LRATCFG_ASSOC  0xFF00
+#define LRATCFG_LASIZE 0x00FE
+#define LRATCFG_LPID   0x2000
+#define LRATCFG_NENTRY 0x0FFF
+
 #ifndef __ASSEMBLY__
 #include asm/bug.h
 
@@ -294,6 +300,7 @@ struct tlb_core_data {
 
/* For software way selection, as on Freescale TLB1 */
u8 esel_next, esel_max, esel_first;
+   u8 lrat_next, lrat_max;
 };
 
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/reg_booke.h 
b/arch/powerpc/include/asm/reg_booke.h
index 464f108..75bda23 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -64,6 +64,7 @@
 #define SPRN_DVC2  0x13F   /* Data Value Compare Register 2 */
 #define SPRN_LPID  0x152   /* Logical Partition ID */
 #define SPRN_MAS8  0x155   /* MMU Assist Register 8 */
+#define SPRN_LRATCFG   0x156   /* LRAT Configuration Register */
 #define SPRN_TLB0PS0x158   /* TLB 0 Page Size Register */
 #define SPRN_TLB1PS0x159   /* TLB 1 Page Size Register */
 #define SPRN_MAS5_MAS6 0x15c   /* MMU Assist Register 5 || 6 */
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 94cd728..6492708 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -196,6 +196,14 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t 
phys, unsigned long virt,
get_paca()-tcd.esel_next = i;
get_paca()-tcd.esel_max = mfspr(SPRN_TLB1CFG)  TLBnCFG_N_ENTRY;
get_paca()-tcd.esel_first = i;
+
+   get_paca()-tcd.lrat_next = 0;
+   if (((mfspr(SPRN_MMUCFG)  MMUCFG_MAVN) == MMUCFG_MAVN_V2) 
+   (mfspr(SPRN_MMUCFG)  MMUCFG_LRAT)) {
+   get_paca()-tcd.lrat_max = mfspr(SPRN_LRATCFG)  LRATCFG_NENTRY;
+   } else {
+   get_paca()-tcd.lrat_max = 0;
+   }
 #endif
 
return amount_mapped;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 3/4] KVM: PPC: e500: TLB emulation for IND entries

2014-07-03 Thread Mihai Caraman
Handle indirect entries (IND) in TLB emulation code. Translation size of IND
entries differ from the size of referred Page Tables (Linux guests now use IND
of 2MB for 4KB PTs) and this require careful tweak of the existing logic.

TLB search emulation requires additional search in HW TLB0 (since these entries
are directly added by HTW) and found entries shoud be presented to the guest 
with
RPN changed from PFN to GFN. There might be more GFNs pointing to the same PFN 
so
the only way to get the corresponding GFN is to search it in guest's PTE. If IND
entry for the corresponding PT is not available just invalidate guest's ea and
report a tlbsx miss. This patch only implements the invalidation and let a TODO
note for searching HW TLB0.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/mmu-book3e.h |  2 +
 arch/powerpc/kvm/e500.h   | 81 ---
 arch/powerpc/kvm/e500_mmu.c   | 78 +++--
 arch/powerpc/kvm/e500_mmu_host.c  | 31 --
 arch/powerpc/kvm/e500mc.c | 53 +--
 5 files changed, 211 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index ac6acf7..e482ad8 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -59,6 +59,7 @@
 #define MAS1_IPROT 0x4000
 #define MAS1_TID(x)(((x)  16)  0x3FFF)
 #define MAS1_IND   0x2000
+#define MAS1_IND_SHIFT 13
 #define MAS1_TS0x1000
 #define MAS1_TSIZE_MASK0x0f80
 #define MAS1_TSIZE_SHIFT   7
@@ -94,6 +95,7 @@
 #define MAS4_TLBSEL_MASK   MAS0_TLBSEL_MASK
 #define MAS4_TLBSELD(x)MAS0_TLBSEL(x)
 #define MAS4_INDD  0x8000  /* Default IND */
+#define MAS4_INDD_SHIFT15
 #define MAS4_TSIZED(x) MAS1_TSIZE(x)
 #define MAS4_X0D   0x0040
 #define MAS4_X1D   0x0020
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index a326178..70a556d 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -148,6 +148,22 @@ unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 
*vcpu_e500,
 unsigned int pr, int avoid_recursion);
 #endif
 
+static inline bool has_feature(const struct kvm_vcpu *vcpu,
+  enum vcpu_ftr ftr)
+{
+   bool has_ftr;
+
+   switch (ftr) {
+   case VCPU_FTR_MMU_V2:
+   has_ftr = ((vcpu-arch.mmucfg  MMUCFG_MAVN) == MMUCFG_MAVN_V2);
+   break;
+
+   default:
+   return false;
+   }
+   return has_ftr;
+}
+
 /* TLB helper functions */
 static inline unsigned int
 get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
@@ -207,6 +223,16 @@ get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
return (tlbe-mas1  MAS1_TSIZE_MASK)  MAS1_TSIZE_SHIFT;
 }
 
+static inline unsigned int
+get_tlb_ind(const struct kvm_vcpu *vcpu,
+   const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+   if (has_feature(vcpu, VCPU_FTR_MMU_V2))
+   return (tlbe-mas1  MAS1_IND)  MAS1_IND_SHIFT;
+
+   return 0;
+}
+
 static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
 {
return vcpu-arch.pid  0xff;
@@ -232,6 +258,30 @@ static inline unsigned int get_cur_sas(const struct 
kvm_vcpu *vcpu)
return vcpu-arch.shared-mas6  0x1;
 }
 
+static inline unsigned int get_cur_ind(const struct kvm_vcpu *vcpu)
+{
+   if (has_feature(vcpu, VCPU_FTR_MMU_V2))
+   return (vcpu-arch.shared-mas1  MAS1_IND)  MAS1_IND_SHIFT;
+
+   return 0;
+}
+
+static inline unsigned int get_cur_indd(const struct kvm_vcpu *vcpu)
+{
+   if (has_feature(vcpu, VCPU_FTR_MMU_V2))
+   return (vcpu-arch.shared-mas4  MAS4_INDD)  MAS4_INDD_SHIFT;
+
+   return 0;
+}
+
+static inline unsigned int get_cur_sind(const struct kvm_vcpu *vcpu)
+{
+   if (has_feature(vcpu, VCPU_FTR_MMU_V2))
+   return (vcpu-arch.shared-mas6  MAS6_SIND)  MAS6_SIND_SHIFT;
+
+   return 0;
+}
+
 static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
 {
/*
@@ -286,6 +336,22 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 
*vcpu_e500,
 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
 
 #ifdef CONFIG_KVM_BOOKE_HV
+void inval_tlb_on_host(struct kvm_vcpu *vcpu, int type, int pid);
+
+void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, int pid, int sas,
+ int sind);
+#else
+/* TLB is fully virtualized */
+static inline void inval_tlb_on_host(struct kvm_vcpu *vcpu,
+int type, int pid)
+{}
+
+static inline void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, int pid,
+   int sas, int sind)
+{}
+#endif
+
+#ifdef

[RFC PATCH 0/4] KVM Book3E support for HTW guests

2014-07-03 Thread Mihai Caraman
KVM Book3E support for Hardware Page Tablewalk enabled guests.

Mihai Caraman (4):
  powerpc/booke64: Add LRAT next and max entries to tlb_core_data
structure
  KVM: PPC: Book3E: Handle LRAT error exception
  KVM: PPC: e500: TLB emulation for IND entries
  KVM: PPC: e500mc: Advertise E.PT to support HTW guests

 arch/powerpc/include/asm/kvm_host.h   |   1 +
 arch/powerpc/include/asm/kvm_ppc.h|   2 +
 arch/powerpc/include/asm/mmu-book3e.h |  12 +++
 arch/powerpc/include/asm/reg_booke.h  |  14 +++
 arch/powerpc/kernel/asm-offsets.c |   1 +
 arch/powerpc/kvm/booke.c  |  40 +
 arch/powerpc/kvm/bookehv_interrupts.S |   9 +-
 arch/powerpc/kvm/e500.h   |  81 ++
 arch/powerpc/kvm/e500_mmu.c   |  84 ++
 arch/powerpc/kvm/e500_mmu_host.c  | 156 +-
 arch/powerpc/kvm/e500mc.c |  55 +++-
 arch/powerpc/mm/fsl_booke_mmu.c   |   8 ++
 12 files changed, 423 insertions(+), 40 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 4/4] KVM: PPC: e500mc: Advertise E.PT to support HTW guests

2014-07-03 Thread Mihai Caraman
Enable E.PT for vcpus with MMU MAV 2.0 to support Hardware Page Tablewalk (HTW)
in guests.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kvm/e500_mmu.c | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index b775e6a..1de0cd6 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -945,11 +945,7 @@ static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
vcpu-arch.tlbps[1] = mfspr(SPRN_TLB1PS);
 
vcpu-arch.mmucfg = ~MMUCFG_LRAT;
-
-   /* Guest mmu emulation currently doesn't handle E.PT */
-   vcpu-arch.eptcfg = 0;
-   vcpu-arch.tlbcfg[0] = ~TLBnCFG_PT;
-   vcpu-arch.tlbcfg[1] = ~TLBnCFG_IND;
+   vcpu-arch.eptcfg = mfspr(SPRN_EPTCFG);
}
 
return 0;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 2/4] KVM: PPC: Book3E: Handle LRAT error exception

2014-07-03 Thread Mihai Caraman
Handle LRAT error exception with support for lrat mapping and invalidation.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/kvm_host.h   |   1 +
 arch/powerpc/include/asm/kvm_ppc.h|   2 +
 arch/powerpc/include/asm/mmu-book3e.h |   3 +
 arch/powerpc/include/asm/reg_booke.h  |  13 
 arch/powerpc/kernel/asm-offsets.c |   1 +
 arch/powerpc/kvm/booke.c  |  40 +++
 arch/powerpc/kvm/bookehv_interrupts.S |   9 ++-
 arch/powerpc/kvm/e500_mmu_host.c  | 125 ++
 arch/powerpc/kvm/e500mc.c |   2 +
 9 files changed, 195 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b..7b6b2ec 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -433,6 +433,7 @@ struct kvm_vcpu_arch {
u32 eplc;
u32 epsc;
u32 oldpir;
+   u64 fault_lper;
 #endif
 
 #if defined(CONFIG_BOOKE)
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd..2730a29 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -86,6 +86,8 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned 
int gtlb_index,
   gva_t eaddr);
 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_lrat_map(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern void kvmppc_lrat_invalidate(struct kvm_vcpu *vcpu);
 
 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
 unsigned int id);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 088fd9f..ac6acf7 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,6 +40,8 @@
 
 /* MAS registers bit definitions */
 
+#define MAS0_ATSEL 0x8000
+#define MAS0_ATSEL_SHIFT   31
 #define MAS0_TLBSEL_MASK0x3000
 #define MAS0_TLBSEL_SHIFT   28
 #define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
@@ -53,6 +55,7 @@
 #define MAS0_WQ_CLR_RSRV   0x2000
 
 #define MAS1_VALID 0x8000
+#define MAS1_VALID_SHIFT   31
 #define MAS1_IPROT 0x4000
 #define MAS1_TID(x)(((x)  16)  0x3FFF)
 #define MAS1_IND   0x2000
diff --git a/arch/powerpc/include/asm/reg_booke.h 
b/arch/powerpc/include/asm/reg_booke.h
index 75bda23..783d617 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -43,6 +43,8 @@
 
 /* Special Purpose Registers (SPRNs)*/
 #define SPRN_DECAR 0x036   /* Decrementer Auto Reload Register */
+#define SPRN_LPER  0x038   /* Logical Page Exception Register */
+#define SPRN_LPERU 0x039   /* Logical Page Exception Register Upper */
 #define SPRN_IVPR  0x03F   /* Interrupt Vector Prefix Register */
 #define SPRN_USPRG00x100   /* User Special Purpose Register General 0 */
 #define SPRN_SPRG3R0x103   /* Special Purpose Register General 3 Read */
@@ -358,6 +360,9 @@
 #define ESR_ILK0x0010  /* Instr. Cache Locking */
 #define ESR_PUO0x0004  /* Unimplemented Operation 
exception */
 #define ESR_BO 0x0002  /* Byte Ordering */
+#define ESR_DATA   0x0400  /* Page Table Data Access */
+#define ESR_TLBI   0x0200  /* Page Table TLB Ineligible */
+#define ESR_PT 0x0100  /* Page Table Translation */
 #define ESR_SPV0x0080  /* Signal Processing operation 
*/
 
 /* Bit definitions related to the DBCR0. */
@@ -649,6 +654,14 @@
 #define EPC_EPID   0x3fff
 #define EPC_EPID_SHIFT 0
 
+/* Bit definitions for LPER */
+#define LPER_ALPN  0x000FF000ULL
+#define LPER_ALPN_SHIFT12
+#define LPER_WIMGE 0x0F80
+#define LPER_WIMGE_SHIFT   7
+#define LPER_LPS   0x000F
+#define LPER_LPS_SHIFT 0
+
 /*
  * The IBM-403 is an even more odd special case, as it is much
  * older than the IBM-405 series.  We put these down here incase someone
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index f5995a9..be6e329 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -713,6 +713,7 @@ int main(void)
DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
+   DEFINE(VCPU_FAULT_LPER, offsetof(struct kvm_vcpu, arch.fault_lper));
 #endif
 
 #ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index a192975..ab1077f 100644

[RFC PATCH 0/4] KVM Book3E support for HTW guests

2014-07-03 Thread Mihai Caraman
KVM Book3E support for Hardware Page Tablewalk enabled guests.

Mihai Caraman (4):
  powerpc/booke64: Add LRAT next and max entries to tlb_core_data
structure
  KVM: PPC: Book3E: Handle LRAT error exception
  KVM: PPC: e500: TLB emulation for IND entries
  KVM: PPC: e500mc: Advertise E.PT to support HTW guests

 arch/powerpc/include/asm/kvm_host.h   |   1 +
 arch/powerpc/include/asm/kvm_ppc.h|   2 +
 arch/powerpc/include/asm/mmu-book3e.h |  12 +++
 arch/powerpc/include/asm/reg_booke.h  |  14 +++
 arch/powerpc/kernel/asm-offsets.c |   1 +
 arch/powerpc/kvm/booke.c  |  40 +
 arch/powerpc/kvm/bookehv_interrupts.S |   9 +-
 arch/powerpc/kvm/e500.h   |  81 ++
 arch/powerpc/kvm/e500_mmu.c   |  84 ++
 arch/powerpc/kvm/e500_mmu_host.c  | 156 +-
 arch/powerpc/kvm/e500mc.c |  55 +++-
 arch/powerpc/mm/fsl_booke_mmu.c   |   8 ++
 12 files changed, 423 insertions(+), 40 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 1/4] powerpc/booke64: Add LRAT next and max entries to tlb_core_data structure

2014-07-03 Thread Mihai Caraman
LRAT (Logical to Real Address Translation) is shared between hw threads.
Add LRAT next and max entries to tlb_core_data structure and initialize them.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/mmu-book3e.h | 7 +++
 arch/powerpc/include/asm/reg_booke.h  | 1 +
 arch/powerpc/mm/fsl_booke_mmu.c   | 8 
 3 files changed, 16 insertions(+)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 8d24f78..088fd9f 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -217,6 +217,12 @@
 #define TLBILX_T_CLASS26
 #define TLBILX_T_CLASS37
 
+/* LRATCFG bits */
+#define LRATCFG_ASSOC  0xFF00
+#define LRATCFG_LASIZE 0x00FE
+#define LRATCFG_LPID   0x2000
+#define LRATCFG_NENTRY 0x0FFF
+
 #ifndef __ASSEMBLY__
 #include asm/bug.h
 
@@ -294,6 +300,7 @@ struct tlb_core_data {
 
/* For software way selection, as on Freescale TLB1 */
u8 esel_next, esel_max, esel_first;
+   u8 lrat_next, lrat_max;
 };
 
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/reg_booke.h 
b/arch/powerpc/include/asm/reg_booke.h
index 464f108..75bda23 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -64,6 +64,7 @@
 #define SPRN_DVC2  0x13F   /* Data Value Compare Register 2 */
 #define SPRN_LPID  0x152   /* Logical Partition ID */
 #define SPRN_MAS8  0x155   /* MMU Assist Register 8 */
+#define SPRN_LRATCFG   0x156   /* LRAT Configuration Register */
 #define SPRN_TLB0PS0x158   /* TLB 0 Page Size Register */
 #define SPRN_TLB1PS0x159   /* TLB 1 Page Size Register */
 #define SPRN_MAS5_MAS6 0x15c   /* MMU Assist Register 5 || 6 */
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 94cd728..6492708 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -196,6 +196,14 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t 
phys, unsigned long virt,
get_paca()-tcd.esel_next = i;
get_paca()-tcd.esel_max = mfspr(SPRN_TLB1CFG)  TLBnCFG_N_ENTRY;
get_paca()-tcd.esel_first = i;
+
+   get_paca()-tcd.lrat_next = 0;
+   if (((mfspr(SPRN_MMUCFG)  MMUCFG_MAVN) == MMUCFG_MAVN_V2) 
+   (mfspr(SPRN_MMUCFG)  MMUCFG_LRAT)) {
+   get_paca()-tcd.lrat_max = mfspr(SPRN_LRATCFG)  LRATCFG_NENTRY;
+   } else {
+   get_paca()-tcd.lrat_max = 0;
+   }
 #endif
 
return amount_mapped;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 3/4] KVM: PPC: e500: TLB emulation for IND entries

2014-07-03 Thread Mihai Caraman
Handle indirect entries (IND) in TLB emulation code. Translation size of IND
entries differ from the size of referred Page Tables (Linux guests now use IND
of 2MB for 4KB PTs) and this require careful tweak of the existing logic.

TLB search emulation requires additional search in HW TLB0 (since these entries
are directly added by HTW) and found entries shoud be presented to the guest 
with
RPN changed from PFN to GFN. There might be more GFNs pointing to the same PFN 
so
the only way to get the corresponding GFN is to search it in guest's PTE. If IND
entry for the corresponding PT is not available just invalidate guest's ea and
report a tlbsx miss. This patch only implements the invalidation and let a TODO
note for searching HW TLB0.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/mmu-book3e.h |  2 +
 arch/powerpc/kvm/e500.h   | 81 ---
 arch/powerpc/kvm/e500_mmu.c   | 78 +++--
 arch/powerpc/kvm/e500_mmu_host.c  | 31 --
 arch/powerpc/kvm/e500mc.c | 53 +--
 5 files changed, 211 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index ac6acf7..e482ad8 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -59,6 +59,7 @@
 #define MAS1_IPROT 0x4000
 #define MAS1_TID(x)(((x)  16)  0x3FFF)
 #define MAS1_IND   0x2000
+#define MAS1_IND_SHIFT 13
 #define MAS1_TS0x1000
 #define MAS1_TSIZE_MASK0x0f80
 #define MAS1_TSIZE_SHIFT   7
@@ -94,6 +95,7 @@
 #define MAS4_TLBSEL_MASK   MAS0_TLBSEL_MASK
 #define MAS4_TLBSELD(x)MAS0_TLBSEL(x)
 #define MAS4_INDD  0x8000  /* Default IND */
+#define MAS4_INDD_SHIFT15
 #define MAS4_TSIZED(x) MAS1_TSIZE(x)
 #define MAS4_X0D   0x0040
 #define MAS4_X1D   0x0020
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index a326178..70a556d 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -148,6 +148,22 @@ unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 
*vcpu_e500,
 unsigned int pr, int avoid_recursion);
 #endif
 
+static inline bool has_feature(const struct kvm_vcpu *vcpu,
+  enum vcpu_ftr ftr)
+{
+   bool has_ftr;
+
+   switch (ftr) {
+   case VCPU_FTR_MMU_V2:
+   has_ftr = ((vcpu-arch.mmucfg  MMUCFG_MAVN) == MMUCFG_MAVN_V2);
+   break;
+
+   default:
+   return false;
+   }
+   return has_ftr;
+}
+
 /* TLB helper functions */
 static inline unsigned int
 get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
@@ -207,6 +223,16 @@ get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
return (tlbe-mas1  MAS1_TSIZE_MASK)  MAS1_TSIZE_SHIFT;
 }
 
+static inline unsigned int
+get_tlb_ind(const struct kvm_vcpu *vcpu,
+   const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+   if (has_feature(vcpu, VCPU_FTR_MMU_V2))
+   return (tlbe-mas1  MAS1_IND)  MAS1_IND_SHIFT;
+
+   return 0;
+}
+
 static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
 {
return vcpu-arch.pid  0xff;
@@ -232,6 +258,30 @@ static inline unsigned int get_cur_sas(const struct 
kvm_vcpu *vcpu)
return vcpu-arch.shared-mas6  0x1;
 }
 
+static inline unsigned int get_cur_ind(const struct kvm_vcpu *vcpu)
+{
+   if (has_feature(vcpu, VCPU_FTR_MMU_V2))
+   return (vcpu-arch.shared-mas1  MAS1_IND)  MAS1_IND_SHIFT;
+
+   return 0;
+}
+
+static inline unsigned int get_cur_indd(const struct kvm_vcpu *vcpu)
+{
+   if (has_feature(vcpu, VCPU_FTR_MMU_V2))
+   return (vcpu-arch.shared-mas4  MAS4_INDD)  MAS4_INDD_SHIFT;
+
+   return 0;
+}
+
+static inline unsigned int get_cur_sind(const struct kvm_vcpu *vcpu)
+{
+   if (has_feature(vcpu, VCPU_FTR_MMU_V2))
+   return (vcpu-arch.shared-mas6  MAS6_SIND)  MAS6_SIND_SHIFT;
+
+   return 0;
+}
+
 static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
 {
/*
@@ -286,6 +336,22 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 
*vcpu_e500,
 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
 
 #ifdef CONFIG_KVM_BOOKE_HV
+void inval_tlb_on_host(struct kvm_vcpu *vcpu, int type, int pid);
+
+void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, int pid, int sas,
+ int sind);
+#else
+/* TLB is fully virtualized */
+static inline void inval_tlb_on_host(struct kvm_vcpu *vcpu,
+int type, int pid)
+{}
+
+static inline void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, int pid,
+   int sas, int sind)
+{}
+#endif
+
+#ifdef

[RFC PATCH 2/4] KVM: PPC: Book3E: Handle LRAT error exception

2014-07-03 Thread Mihai Caraman
Handle LRAT error exception with support for lrat mapping and invalidation.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/kvm_host.h   |   1 +
 arch/powerpc/include/asm/kvm_ppc.h|   2 +
 arch/powerpc/include/asm/mmu-book3e.h |   3 +
 arch/powerpc/include/asm/reg_booke.h  |  13 
 arch/powerpc/kernel/asm-offsets.c |   1 +
 arch/powerpc/kvm/booke.c  |  40 +++
 arch/powerpc/kvm/bookehv_interrupts.S |   9 ++-
 arch/powerpc/kvm/e500_mmu_host.c  | 125 ++
 arch/powerpc/kvm/e500mc.c |   2 +
 9 files changed, 195 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b..7b6b2ec 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -433,6 +433,7 @@ struct kvm_vcpu_arch {
u32 eplc;
u32 epsc;
u32 oldpir;
+   u64 fault_lper;
 #endif
 
 #if defined(CONFIG_BOOKE)
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd..2730a29 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -86,6 +86,8 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned 
int gtlb_index,
   gva_t eaddr);
 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_lrat_map(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern void kvmppc_lrat_invalidate(struct kvm_vcpu *vcpu);
 
 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
 unsigned int id);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 088fd9f..ac6acf7 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,6 +40,8 @@
 
 /* MAS registers bit definitions */
 
+#define MAS0_ATSEL 0x8000
+#define MAS0_ATSEL_SHIFT   31
 #define MAS0_TLBSEL_MASK0x3000
 #define MAS0_TLBSEL_SHIFT   28
 #define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
@@ -53,6 +55,7 @@
 #define MAS0_WQ_CLR_RSRV   0x2000
 
 #define MAS1_VALID 0x8000
+#define MAS1_VALID_SHIFT   31
 #define MAS1_IPROT 0x4000
 #define MAS1_TID(x)(((x)  16)  0x3FFF)
 #define MAS1_IND   0x2000
diff --git a/arch/powerpc/include/asm/reg_booke.h 
b/arch/powerpc/include/asm/reg_booke.h
index 75bda23..783d617 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -43,6 +43,8 @@
 
 /* Special Purpose Registers (SPRNs)*/
 #define SPRN_DECAR 0x036   /* Decrementer Auto Reload Register */
+#define SPRN_LPER  0x038   /* Logical Page Exception Register */
+#define SPRN_LPERU 0x039   /* Logical Page Exception Register Upper */
 #define SPRN_IVPR  0x03F   /* Interrupt Vector Prefix Register */
 #define SPRN_USPRG00x100   /* User Special Purpose Register General 0 */
 #define SPRN_SPRG3R0x103   /* Special Purpose Register General 3 Read */
@@ -358,6 +360,9 @@
 #define ESR_ILK0x0010  /* Instr. Cache Locking */
 #define ESR_PUO0x0004  /* Unimplemented Operation 
exception */
 #define ESR_BO 0x0002  /* Byte Ordering */
+#define ESR_DATA   0x0400  /* Page Table Data Access */
+#define ESR_TLBI   0x0200  /* Page Table TLB Ineligible */
+#define ESR_PT 0x0100  /* Page Table Translation */
 #define ESR_SPV0x0080  /* Signal Processing operation 
*/
 
 /* Bit definitions related to the DBCR0. */
@@ -649,6 +654,14 @@
 #define EPC_EPID   0x3fff
 #define EPC_EPID_SHIFT 0
 
+/* Bit definitions for LPER */
+#define LPER_ALPN  0x000FF000ULL
+#define LPER_ALPN_SHIFT12
+#define LPER_WIMGE 0x0F80
+#define LPER_WIMGE_SHIFT   7
+#define LPER_LPS   0x000F
+#define LPER_LPS_SHIFT 0
+
 /*
  * The IBM-403 is an even more odd special case, as it is much
  * older than the IBM-405 series.  We put these down here incase someone
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index f5995a9..be6e329 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -713,6 +713,7 @@ int main(void)
DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
+   DEFINE(VCPU_FAULT_LPER, offsetof(struct kvm_vcpu, arch.fault_lper));
 #endif
 
 #ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index a192975..ab1077f 100644

[RFC PATCH 4/4] KVM: PPC: e500mc: Advertise E.PT to support HTW guests

2014-07-03 Thread Mihai Caraman
Enable E.PT for vcpus with MMU MAV 2.0 to support Hardware Page Tablewalk (HTW)
in guests.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kvm/e500_mmu.c | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index b775e6a..1de0cd6 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -945,11 +945,7 @@ static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
vcpu-arch.tlbps[1] = mfspr(SPRN_TLB1PS);
 
vcpu-arch.mmucfg = ~MMUCFG_LRAT;
-
-   /* Guest mmu emulation currently doesn't handle E.PT */
-   vcpu-arch.eptcfg = 0;
-   vcpu-arch.tlbcfg[0] = ~TLBnCFG_PT;
-   vcpu-arch.tlbcfg[1] = ~TLBnCFG_IND;
+   vcpu-arch.eptcfg = mfspr(SPRN_EPTCFG);
}
 
return 0;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: PPC: e500: Fix default tlb for victim hint

2014-06-30 Thread Mihai Caraman
Tlb search operation used for victim hint relies on the default tlb set by the
host. When hardware tablewalk support is enabled in the host, the default tlb is
TLB1 which leads KVM to evict the bolted entry. Set and restore the default tlb
when searching for victim hint.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/mmu-book3e.h | 5 -
 arch/powerpc/kvm/e500_mmu_host.c  | 4 
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 901dac6..5dad378 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,7 +40,9 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL(x) (((x)  28)  0x3000)
+#define MAS0_TLBSEL_MASK0x3000
+#define MAS0_TLBSEL_SHIFT   28
+#define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
 #define MAS0_ESEL_MASK 0x0FFF
 #define MAS0_ESEL_SHIFT16
 #define MAS0_ESEL(x)   (((x)  MAS0_ESEL_SHIFT)  MAS0_ESEL_MASK)
@@ -86,6 +88,7 @@
 #define MAS3_SPSIZE0x003e
 #define MAS3_SPSIZE_SHIFT  1
 
+#define MAS4_TLBSEL_MASK   MAS0_TLBSEL_MASK
 #define MAS4_TLBSELD(x)MAS0_TLBSEL(x)
 #define MAS4_INDD  0x8000  /* Default IND */
 #define MAS4_TSIZED(x) MAS1_TSIZE(x)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03..79677d7 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -107,11 +107,15 @@ static u32 get_host_mas0(unsigned long eaddr)
 {
unsigned long flags;
u32 mas0;
+   u32 mas4;
 
local_irq_save(flags);
mtspr(SPRN_MAS6, 0);
+   mas4 = mfspr(SPRN_MAS4);
+   mtspr(SPRN_MAS4, mas4  ~MAS4_TLBSEL_MASK);
asm volatile(tlbsx 0, %0 : : b (eaddr  ~CONFIG_PAGE_OFFSET));
mas0 = mfspr(SPRN_MAS0);
+   mtspr(SPRN_MAS4, mas4);
local_irq_restore(flags);
 
return mas0;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: PPC: e500: Emulate power management control SPR

2014-06-30 Thread Mihai Caraman
For FSL e6500 core the kernel uses power management SPR register (PWRMGTCR0)
to enable idle power down for cores and devices by setting up the idle count
period at boot time. With the host already controlling the power management
configuration the guest could simply benefit from it, so emulate guest request
as nop.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kvm/e500_emulate.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 002d517..98a22e5 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -250,6 +250,10 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong spr_va
spr_val);
break;
 
+   case SPRN_PWRMGTCR0:
+   /* Guest relies on host power management configurations */
+   break;
+
/* extra exceptions */
case SPRN_IVOR32:
vcpu-arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
@@ -355,6 +359,10 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong *spr_v
*spr_val = 0;
break;
 
+   case SPRN_PWRMGTCR0:
+   *spr_val = 0;
+   break;
+
case SPRN_MMUCFG:
*spr_val = vcpu-arch.mmucfg;
break;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 6/6 v2] KVM: PPC: Book3E: Enable e6500 core

2014-06-30 Thread Mihai Caraman
Now that AltiVec support is in place enable e6500 core.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - no changes

 arch/powerpc/kvm/e500mc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index c60b653..0bc9684 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,6 +177,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec-cpu_name, e5500) == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+   /*
+* Since guests have the priviledge to enable AltiVec, we need AltiVec
+* support in the host to save/restore their context.
+* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+* because it's cleared in the absence of CONFIG_ALTIVEC!
+*/
+   else if (strcmp(cur_cpu_spec-cpu_name, e6500) == 0)
+   r = 0;
+#endif
else
r = -ENOTSUPP;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 5/6 v2] KVM: PPC: Book3E: Add ONE_REG AltiVec support

2014-06-30 Thread Mihai Caraman
Add ONE_REG support for AltiVec on Book3E.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - add comment describing VCSR register representation in KVM vs kernel

 arch/powerpc/include/uapi/asm/kvm.h |  5 +
 arch/powerpc/kvm/booke.c| 34 ++
 2 files changed, 39 insertions(+)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 2bc4a94..3adbce4 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -476,6 +476,11 @@ struct kvm_get_htab_header {
 
 /* FP and vector status/control registers */
 #define KVM_REG_PPC_FPSCR  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+/*
+ * VSCR register is documented as a 32-bit register in the ISA, but it can
+ * only be accesses via a vector register. Expose VSCR as a 32-bit register
+ * even though the kernel represents it as a 128-bit vector.
+ */
 #define KVM_REG_PPC_VSCR   (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
 
 /* Virtual processor areas */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4ba75f6..fe15a94 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1634,6 +1634,23 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg-id, vcpu-arch.vrsave);
break;
+#ifdef CONFIG_ALTIVEC
+   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   val.vval = vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0];
+   break;
+   case KVM_REG_PPC_VSCR:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   val = get_reg_val(reg-id, vcpu-arch.vr.vscr.u[3]);
+   break;
+#endif /* CONFIG_ALTIVEC */
+
default:
r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, reg-id, val);
break;
@@ -1717,6 +1734,23 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
case KVM_REG_PPC_VRSAVE:
vcpu-arch.vrsave = set_reg_val(reg-id, val);
break;
+#ifdef CONFIG_ALTIVEC
+   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0] = val.vval;
+   break;
+   case KVM_REG_PPC_VSCR:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   vcpu-arch.vr.vscr.u[3] = set_reg_val(reg-id, val);
+   break;
+#endif /* CONFIG_ALTIVEC */
+
default:
r = vcpu-kvm-arch.kvm_ops-set_one_reg(vcpu, reg-id, val);
break;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/6 v2] KVM: PPC: Book3E: Refactor SPE/FP exit handling

2014-06-30 Thread Mihai Caraman
SPE/FP/AltiVec interrupts share the same numbers. Refactor SPE/FP exit handling
to accommodate AltiVec later on the same flow. Add kvmppc_supports_spe() to 
detect
suport for the unit at runtime since it can be configured in the kernel but not
featured on hardware and vice versa.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - enable SPE only if !HV  SPE

 arch/powerpc/kvm/booke.c | 93 +++-
 1 file changed, 60 insertions(+), 33 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 3c86d9b..80cd8df 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -91,6 +91,15 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
}
 }
 
+static inline bool kvmppc_supports_spe(void)
+{
+#ifdef CONFIG_SPE
+   if (cpu_has_feature(CPU_FTR_SPE))
+   return true;
+#endif
+   return false;
+}
+
 #ifdef CONFIG_SPE
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 {
@@ -976,49 +985,67 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
 
-#ifdef CONFIG_SPE
case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
-   if (vcpu-arch.shared-msr  MSR_SPE)
-   kvmppc_vcpu_enable_spe(vcpu);
-   else
-   kvmppc_booke_queue_irqprio(vcpu,
-   BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
+   if (kvmppc_supports_spe()) {
+   bool enabled = false;
+
+#if !defined(CONFIG_KVM_BOOKE_HV)  defined(CONFIG_SPE)
+   if (vcpu-arch.shared-msr  MSR_SPE) {
+   kvmppc_vcpu_enable_spe(vcpu);
+   enabled = true;
+   }
+#endif
+   if (!enabled)
+   kvmppc_booke_queue_irqprio(vcpu,
+   BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
+   } else {
+   /*
+* Guest wants SPE, but host kernel doesn't support it.
+* Send an unimplemented operation program check to
+* the guest.
+*/
+   kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
+   }
+
r = RESUME_GUEST;
break;
}
 
case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
-   kvmppc_booke_queue_irqprio(vcpu,
-   BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
-   r = RESUME_GUEST;
-   break;
-
-   case BOOKE_INTERRUPT_SPE_FP_ROUND:
-   kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
-   r = RESUME_GUEST;
-   break;
-#else
-   case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL:
-   /*
-* Guest wants SPE, but host kernel doesn't support it.  Send
-* an unimplemented operation program check to the guest.
-*/
-   kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
-   r = RESUME_GUEST;
+   if (kvmppc_supports_spe()) {
+   kvmppc_booke_queue_irqprio(vcpu,
+   BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
+   r = RESUME_GUEST;
+   } else {
+   /*
+* These really should never happen without CONFIG_SPE,
+* as we should never enable the real MSR[SPE] in the
+* guest.
+*/
+   pr_crit(%s: unexpected SPE interrupt %u at %08lx\n,
+   __func__, exit_nr, vcpu-arch.pc);
+   run-hw.hardware_exit_reason = exit_nr;
+   r = RESUME_HOST;
+   }
break;
 
-   /*
-* These really should never happen without CONFIG_SPE,
-* as we should never enable the real MSR[SPE] in the guest.
-*/
-   case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
case BOOKE_INTERRUPT_SPE_FP_ROUND:
-   printk(KERN_CRIT %s: unexpected SPE interrupt %u at %08lx\n,
-  __func__, exit_nr, vcpu-arch.pc);
-   run-hw.hardware_exit_reason = exit_nr;
-   r = RESUME_HOST;
+   if (kvmppc_supports_spe()) {
+   kvmppc_booke_queue_irqprio(vcpu,
+   BOOKE_IRQPRIO_SPE_FP_ROUND);
+   r = RESUME_GUEST;
+   } else {
+   /*
+* These really should never happen without CONFIG_SPE,
+* as we should never enable the real MSR[SPE] in the
+* guest.
+*/
+   pr_crit(%s: unexpected SPE interrupt %u

[PATCH 4/6 v2] KVM: PPC: Book3E: Add AltiVec support

2014-06-30 Thread Mihai Caraman
Add KVM Book3E AltiVec support. KVM Book3E FPU support gracefully reuse host
infrastructure so follow the same approach for AltiVec.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - integrate Paul's FP/VMX/VSX changes

 arch/powerpc/kvm/booke.c | 67 ++--
 1 file changed, 65 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4cc9b26..4ba75f6 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -100,6 +100,19 @@ static inline bool kvmppc_supports_spe(void)
return false;
 }
 
+/*
+ * Always returns true if AltiVec unit is present,
+ * see kvmppc_core_check_processor_compat().
+ */
+static inline bool kvmppc_supports_altivec(void)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   return true;
+#endif
+   return false;
+}
+
 #ifdef CONFIG_SPE
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 {
@@ -178,6 +191,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (kvmppc_supports_altivec()) {
+   if (!(current-thread.regs-msr  MSR_VEC)) {
+   enable_kernel_altivec();
+   load_vr_state(vcpu-arch.vr);
+   current-thread.vr_save_area = vcpu-arch.vr;
+   current-thread.regs-msr |= MSR_VEC;
+   }
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu AltiVec state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (kvmppc_supports_altivec()) {
+   if (current-thread.regs-msr  MSR_VEC)
+   giveup_altivec(current);
+   current-thread.vr_save_area = NULL;
+   }
+#endif
+}
+
 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
 {
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
@@ -749,6 +796,17 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   /* Save userspace AltiVec state in stack */
+   if (kvmppc_supports_altivec())
+   enable_kernel_altivec();
+   /*
+* Since we can't trap on MSR_VEC in GS-mode, we consider the guest
+* as always using the AltiVec.
+*/
+   kvmppc_load_guest_altivec(vcpu);
+#endif
+
/* Switch to guest debug context */
debug = vcpu-arch.shadow_dbg_reg;
switch_booke_debug_regs(debug);
@@ -771,6 +829,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_save_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   kvmppc_save_guest_altivec(vcpu);
+#endif
+
 out:
vcpu-mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -1014,7 +1076,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
break;
 
case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
-   if (kvmppc_supports_spe()) {
+   if (kvmppc_supports_spe() || kvmppc_supports_altivec()) {
bool enabled = false;
 
 #if !defined(CONFIG_KVM_BOOKE_HV)  defined(CONFIG_SPE)
@@ -1040,7 +1102,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
}
 
case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
-   if (kvmppc_supports_spe()) {
+   if (kvmppc_supports_spe() || kvmppc_supports_altivec()) {
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
r = RESUME_GUEST;
@@ -1249,6 +1311,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
kvmppc_load_guest_fp(vcpu);
+   kvmppc_load_guest_altivec(vcpu);
}
}
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/6 v2] KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers

2014-06-30 Thread Mihai Caraman
Use common BOOKE_IRQPRIO and BOOKE_INTERRUPT defines for SPE/FP/AltiVec
which share the same interrupt numbers.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - remove outdated definitions

 arch/powerpc/include/asm/kvm_asm.h|  8 
 arch/powerpc/kvm/booke.c  | 17 +
 arch/powerpc/kvm/booke.h  |  4 ++--
 arch/powerpc/kvm/booke_interrupts.S   |  9 +
 arch/powerpc/kvm/bookehv_interrupts.S |  4 ++--
 arch/powerpc/kvm/e500.c   | 10 ++
 arch/powerpc/kvm/e500_emulate.c   | 10 ++
 7 files changed, 30 insertions(+), 32 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_asm.h 
b/arch/powerpc/include/asm/kvm_asm.h
index 9601741..c94fd33 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -56,14 +56,6 @@
 /* E500 */
 #define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
 #define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
-/*
- * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
- */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
-#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
-#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
-#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
-   BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
 #define BOOKE_INTERRUPT_DOORBELL 36
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ab62109..3c86d9b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -388,8 +388,8 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu 
*vcpu,
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
-   case BOOKE_IRQPRIO_SPE_UNAVAIL:
-   case BOOKE_IRQPRIO_SPE_FP_DATA:
+   case BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL:
+   case BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
@@ -977,18 +977,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
break;
 
 #ifdef CONFIG_SPE
-   case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+   case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
if (vcpu-arch.shared-msr  MSR_SPE)
kvmppc_vcpu_enable_spe(vcpu);
else
kvmppc_booke_queue_irqprio(vcpu,
-  BOOKE_IRQPRIO_SPE_UNAVAIL);
+   BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
r = RESUME_GUEST;
break;
}
 
-   case BOOKE_INTERRUPT_SPE_FP_DATA:
-   kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
+   case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
+   kvmppc_booke_queue_irqprio(vcpu,
+   BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
r = RESUME_GUEST;
break;
 
@@ -997,7 +998,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu 
*vcpu,
r = RESUME_GUEST;
break;
 #else
-   case BOOKE_INTERRUPT_SPE_UNAVAIL:
+   case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL:
/*
 * Guest wants SPE, but host kernel doesn't support it.  Send
 * an unimplemented operation program check to the guest.
@@ -1010,7 +1011,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
 * These really should never happen without CONFIG_SPE,
 * as we should never enable the real MSR[SPE] in the guest.
 */
-   case BOOKE_INTERRUPT_SPE_FP_DATA:
+   case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
case BOOKE_INTERRUPT_SPE_FP_ROUND:
printk(KERN_CRIT %s: unexpected SPE interrupt %u at %08lx\n,
   __func__, exit_nr, vcpu-arch.pc);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index b632cd3..f182b32 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -32,8 +32,8 @@
 #define BOOKE_IRQPRIO_ALIGNMENT 2
 #define BOOKE_IRQPRIO_PROGRAM 3
 #define BOOKE_IRQPRIO_FP_UNAVAIL 4
-#define BOOKE_IRQPRIO_SPE_UNAVAIL 5
-#define BOOKE_IRQPRIO_SPE_FP_DATA 6
+#define BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL 5
+#define BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST 6
 #define BOOKE_IRQPRIO_SPE_FP_ROUND 7
 #define BOOKE_IRQPRIO_SYSCALL 8
 #define BOOKE_IRQPRIO_AP_UNAVAIL 9
diff --git a/arch/powerpc/kvm/booke_interrupts.S 
b/arch/powerpc/kvm/booke_interrupts.S
index 2c6deb5ef..a275dc5 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -137,8 +137,9 @@ KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG 
SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
 KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0

[PATCH 0/6 v2] KVM: PPC: Book3e: AltiVec support

2014-06-30 Thread Mihai Caraman
Add KVM Book3E AltiVec support and enable e6500 core.

Integrates Paul's FP/VMX/VSX changes that landed in kvm-ppc-queue in January
and take into account feedback.

Mihai Caraman (6):
  KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers
  KVM: PPC: Book3E: Refactor SPE/FP exit handling
  KVM: PPC: Book3E: Increase FPU laziness
  KVM: PPC: Book3E: Add AltiVec support
  KVM: PPC: Book3E: Add ONE_REG AltiVec support
  KVM: PPC: Book3E: Enable e6500 core

 arch/powerpc/include/asm/kvm_asm.h|   8 --
 arch/powerpc/include/uapi/asm/kvm.h   |   5 +
 arch/powerpc/kvm/booke.c  | 238 --
 arch/powerpc/kvm/booke.h  |  38 +-
 arch/powerpc/kvm/booke_interrupts.S   |   9 +-
 arch/powerpc/kvm/bookehv_interrupts.S |   4 +-
 arch/powerpc/kvm/e500.c   |  10 +-
 arch/powerpc/kvm/e500_emulate.c   |  10 +-
 arch/powerpc/kvm/e500mc.c |  12 +-
 9 files changed, 232 insertions(+), 102 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/6 v2] KVM: PPC: Book3E: Increase FPU laziness

2014-06-30 Thread Mihai Caraman
Increase FPU laziness by calling kvmppc_load_guest_fp() just before
returning to guest instead of each sched in. Without this improvement
an interrupt may also claim floting point corrupting guest state.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - remove fpu_active
 - add descriptive comments

 arch/powerpc/kvm/booke.c  | 43 ---
 arch/powerpc/kvm/booke.h  | 34 --
 arch/powerpc/kvm/e500mc.c |  2 --
 3 files changed, 36 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 80cd8df..4cc9b26 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -134,6 +134,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 }
 #endif
 
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (!(current-thread.regs-msr  MSR_FP)) {
+   enable_kernel_fp();
+   load_fp_state(vcpu-arch.fp);
+   current-thread.fp_save_area = vcpu-arch.fp;
+   current-thread.regs-msr |= MSR_FP;
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (current-thread.regs-msr  MSR_FP)
+   giveup_fpu(current);
+   current-thread.fp_save_area = NULL;
+#endif
+}
+
 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 {
 #if defined(CONFIG_PPC_FPU)  !defined(CONFIG_KVM_BOOKE_HV)
@@ -710,12 +744,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
/*
 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
-* as always using the FPU.  Kernel usage of FP (via
-* enable_kernel_fp()) in this thread must not occur while
-* vcpu-fpu_active is set.
+* as always using the FPU.
 */
-   vcpu-fpu_active = 1;
-
kvmppc_load_guest_fp(vcpu);
 #endif
 
@@ -739,8 +769,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
 #ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
-
-   vcpu-fpu_active = 0;
 #endif
 
 out:
@@ -1220,6 +1248,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
+   kvmppc_load_guest_fp(vcpu);
}
}
 
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f182b32..faad8af 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -123,40 +123,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu 
*vcpu, int sprn,
 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
  ulong *spr_val);
 
-/*
- * Load up guest vcpu FP state if it's needed.
- * It also set the MSR_FP in thread so that host know
- * we're holding FPU, and then host can help to save
- * guest vcpu FP state if other threads require to use FPU.
- * This simulates an FP unavailable fault.
- *
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  !(current-thread.regs-msr  MSR_FP)) {
-   enable_kernel_fp();
-   load_fp_state(vcpu-arch.fp);
-   current-thread.fp_save_area = vcpu-arch.fp;
-   current-thread.regs-msr |= MSR_FP;
-   }
-#endif
-}
-
-/*
- * Save guest vcpu FP state into thread.
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  (current-thread.regs-msr  MSR_FP))
-   giveup_fpu(current);
-   current-thread.fp_save_area = NULL;
-#endif
-}
-
 static inline void kvmppc_clear_dbsr(void)
 {
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 690499d..c60b653 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -145,8 +145,6 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu 
*vcpu, int cpu)
kvmppc_e500_tlbil_all(vcpu_e500);
__get_cpu_var(last_vcpu_of_lpid)[vcpu-kvm-arch.lpid] = vcpu;
}
-
-   kvmppc_load_guest_fp(vcpu);
 }
 
 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
-- 
1.7.11.7

--
To unsubscribe from this list: send

[PATCH] KVM: PPC: e500: Fix default tlb for victim hint

2014-06-30 Thread Mihai Caraman
Tlb search operation used for victim hint relies on the default tlb set by the
host. When hardware tablewalk support is enabled in the host, the default tlb is
TLB1 which leads KVM to evict the bolted entry. Set and restore the default tlb
when searching for victim hint.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/mmu-book3e.h | 5 -
 arch/powerpc/kvm/e500_mmu_host.c  | 4 
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 901dac6..5dad378 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,7 +40,9 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL(x) (((x)  28)  0x3000)
+#define MAS0_TLBSEL_MASK0x3000
+#define MAS0_TLBSEL_SHIFT   28
+#define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
 #define MAS0_ESEL_MASK 0x0FFF
 #define MAS0_ESEL_SHIFT16
 #define MAS0_ESEL(x)   (((x)  MAS0_ESEL_SHIFT)  MAS0_ESEL_MASK)
@@ -86,6 +88,7 @@
 #define MAS3_SPSIZE0x003e
 #define MAS3_SPSIZE_SHIFT  1
 
+#define MAS4_TLBSEL_MASK   MAS0_TLBSEL_MASK
 #define MAS4_TLBSELD(x)MAS0_TLBSEL(x)
 #define MAS4_INDD  0x8000  /* Default IND */
 #define MAS4_TSIZED(x) MAS1_TSIZE(x)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03..79677d7 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -107,11 +107,15 @@ static u32 get_host_mas0(unsigned long eaddr)
 {
unsigned long flags;
u32 mas0;
+   u32 mas4;
 
local_irq_save(flags);
mtspr(SPRN_MAS6, 0);
+   mas4 = mfspr(SPRN_MAS4);
+   mtspr(SPRN_MAS4, mas4  ~MAS4_TLBSEL_MASK);
asm volatile(tlbsx 0, %0 : : b (eaddr  ~CONFIG_PAGE_OFFSET));
mas0 = mfspr(SPRN_MAS0);
+   mtspr(SPRN_MAS4, mas4);
local_irq_restore(flags);
 
return mas0;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: PPC: e500: Emulate power management control SPR

2014-06-30 Thread Mihai Caraman
For FSL e6500 core the kernel uses power management SPR register (PWRMGTCR0)
to enable idle power down for cores and devices by setting up the idle count
period at boot time. With the host already controlling the power management
configuration the guest could simply benefit from it, so emulate guest request
as nop.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kvm/e500_emulate.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 002d517..98a22e5 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -250,6 +250,10 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong spr_va
spr_val);
break;
 
+   case SPRN_PWRMGTCR0:
+   /* Guest relies on host power management configurations */
+   break;
+
/* extra exceptions */
case SPRN_IVOR32:
vcpu-arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
@@ -355,6 +359,10 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong *spr_v
*spr_val = 0;
break;
 
+   case SPRN_PWRMGTCR0:
+   *spr_val = 0;
+   break;
+
case SPRN_MMUCFG:
*spr_val = vcpu-arch.mmucfg;
break;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 6/6 v2] KVM: PPC: Book3E: Enable e6500 core

2014-06-30 Thread Mihai Caraman
Now that AltiVec support is in place enable e6500 core.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - no changes

 arch/powerpc/kvm/e500mc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index c60b653..0bc9684 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,6 +177,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec-cpu_name, e5500) == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+   /*
+* Since guests have the priviledge to enable AltiVec, we need AltiVec
+* support in the host to save/restore their context.
+* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+* because it's cleared in the absence of CONFIG_ALTIVEC!
+*/
+   else if (strcmp(cur_cpu_spec-cpu_name, e6500) == 0)
+   r = 0;
+#endif
else
r = -ENOTSUPP;
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 5/6 v2] KVM: PPC: Book3E: Add ONE_REG AltiVec support

2014-06-30 Thread Mihai Caraman
Add ONE_REG support for AltiVec on Book3E.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - add comment describing VCSR register representation in KVM vs kernel

 arch/powerpc/include/uapi/asm/kvm.h |  5 +
 arch/powerpc/kvm/booke.c| 34 ++
 2 files changed, 39 insertions(+)

diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 2bc4a94..3adbce4 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -476,6 +476,11 @@ struct kvm_get_htab_header {
 
 /* FP and vector status/control registers */
 #define KVM_REG_PPC_FPSCR  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+/*
+ * VSCR register is documented as a 32-bit register in the ISA, but it can
+ * only be accesses via a vector register. Expose VSCR as a 32-bit register
+ * even though the kernel represents it as a 128-bit vector.
+ */
 #define KVM_REG_PPC_VSCR   (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
 
 /* Virtual processor areas */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4ba75f6..fe15a94 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1634,6 +1634,23 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg-id, vcpu-arch.vrsave);
break;
+#ifdef CONFIG_ALTIVEC
+   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   val.vval = vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0];
+   break;
+   case KVM_REG_PPC_VSCR:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   val = get_reg_val(reg-id, vcpu-arch.vr.vscr.u[3]);
+   break;
+#endif /* CONFIG_ALTIVEC */
+
default:
r = vcpu-kvm-arch.kvm_ops-get_one_reg(vcpu, reg-id, val);
break;
@@ -1717,6 +1734,23 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
case KVM_REG_PPC_VRSAVE:
vcpu-arch.vrsave = set_reg_val(reg-id, val);
break;
+#ifdef CONFIG_ALTIVEC
+   case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   vcpu-arch.vr.vr[reg-id - KVM_REG_PPC_VR0] = val.vval;
+   break;
+   case KVM_REG_PPC_VSCR:
+   if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   r = -ENXIO;
+   break;
+   }
+   vcpu-arch.vr.vscr.u[3] = set_reg_val(reg-id, val);
+   break;
+#endif /* CONFIG_ALTIVEC */
+
default:
r = vcpu-kvm-arch.kvm_ops-set_one_reg(vcpu, reg-id, val);
break;
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/6 v2] KVM: PPC: Book3E: Refactor SPE/FP exit handling

2014-06-30 Thread Mihai Caraman
SPE/FP/AltiVec interrupts share the same numbers. Refactor SPE/FP exit handling
to accommodate AltiVec later on the same flow. Add kvmppc_supports_spe() to 
detect
suport for the unit at runtime since it can be configured in the kernel but not
featured on hardware and vice versa.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - enable SPE only if !HV  SPE

 arch/powerpc/kvm/booke.c | 93 +++-
 1 file changed, 60 insertions(+), 33 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 3c86d9b..80cd8df 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -91,6 +91,15 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
}
 }
 
+static inline bool kvmppc_supports_spe(void)
+{
+#ifdef CONFIG_SPE
+   if (cpu_has_feature(CPU_FTR_SPE))
+   return true;
+#endif
+   return false;
+}
+
 #ifdef CONFIG_SPE
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 {
@@ -976,49 +985,67 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
 
-#ifdef CONFIG_SPE
case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
-   if (vcpu-arch.shared-msr  MSR_SPE)
-   kvmppc_vcpu_enable_spe(vcpu);
-   else
-   kvmppc_booke_queue_irqprio(vcpu,
-   BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
+   if (kvmppc_supports_spe()) {
+   bool enabled = false;
+
+#if !defined(CONFIG_KVM_BOOKE_HV)  defined(CONFIG_SPE)
+   if (vcpu-arch.shared-msr  MSR_SPE) {
+   kvmppc_vcpu_enable_spe(vcpu);
+   enabled = true;
+   }
+#endif
+   if (!enabled)
+   kvmppc_booke_queue_irqprio(vcpu,
+   BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
+   } else {
+   /*
+* Guest wants SPE, but host kernel doesn't support it.
+* Send an unimplemented operation program check to
+* the guest.
+*/
+   kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
+   }
+
r = RESUME_GUEST;
break;
}
 
case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
-   kvmppc_booke_queue_irqprio(vcpu,
-   BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
-   r = RESUME_GUEST;
-   break;
-
-   case BOOKE_INTERRUPT_SPE_FP_ROUND:
-   kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
-   r = RESUME_GUEST;
-   break;
-#else
-   case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL:
-   /*
-* Guest wants SPE, but host kernel doesn't support it.  Send
-* an unimplemented operation program check to the guest.
-*/
-   kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
-   r = RESUME_GUEST;
+   if (kvmppc_supports_spe()) {
+   kvmppc_booke_queue_irqprio(vcpu,
+   BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
+   r = RESUME_GUEST;
+   } else {
+   /*
+* These really should never happen without CONFIG_SPE,
+* as we should never enable the real MSR[SPE] in the
+* guest.
+*/
+   pr_crit(%s: unexpected SPE interrupt %u at %08lx\n,
+   __func__, exit_nr, vcpu-arch.pc);
+   run-hw.hardware_exit_reason = exit_nr;
+   r = RESUME_HOST;
+   }
break;
 
-   /*
-* These really should never happen without CONFIG_SPE,
-* as we should never enable the real MSR[SPE] in the guest.
-*/
-   case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
case BOOKE_INTERRUPT_SPE_FP_ROUND:
-   printk(KERN_CRIT %s: unexpected SPE interrupt %u at %08lx\n,
-  __func__, exit_nr, vcpu-arch.pc);
-   run-hw.hardware_exit_reason = exit_nr;
-   r = RESUME_HOST;
+   if (kvmppc_supports_spe()) {
+   kvmppc_booke_queue_irqprio(vcpu,
+   BOOKE_IRQPRIO_SPE_FP_ROUND);
+   r = RESUME_GUEST;
+   } else {
+   /*
+* These really should never happen without CONFIG_SPE,
+* as we should never enable the real MSR[SPE] in the
+* guest.
+*/
+   pr_crit(%s: unexpected SPE interrupt %u

[PATCH 1/6 v2] KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers

2014-06-30 Thread Mihai Caraman
Use common BOOKE_IRQPRIO and BOOKE_INTERRUPT defines for SPE/FP/AltiVec
which share the same interrupt numbers.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - remove outdated definitions

 arch/powerpc/include/asm/kvm_asm.h|  8 
 arch/powerpc/kvm/booke.c  | 17 +
 arch/powerpc/kvm/booke.h  |  4 ++--
 arch/powerpc/kvm/booke_interrupts.S   |  9 +
 arch/powerpc/kvm/bookehv_interrupts.S |  4 ++--
 arch/powerpc/kvm/e500.c   | 10 ++
 arch/powerpc/kvm/e500_emulate.c   | 10 ++
 7 files changed, 30 insertions(+), 32 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_asm.h 
b/arch/powerpc/include/asm/kvm_asm.h
index 9601741..c94fd33 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -56,14 +56,6 @@
 /* E500 */
 #define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
 #define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
-/*
- * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
- */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
-#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
-#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
-#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
-   BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
 #define BOOKE_INTERRUPT_DOORBELL 36
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ab62109..3c86d9b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -388,8 +388,8 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu 
*vcpu,
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
-   case BOOKE_IRQPRIO_SPE_UNAVAIL:
-   case BOOKE_IRQPRIO_SPE_FP_DATA:
+   case BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL:
+   case BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
@@ -977,18 +977,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
break;
 
 #ifdef CONFIG_SPE
-   case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+   case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
if (vcpu-arch.shared-msr  MSR_SPE)
kvmppc_vcpu_enable_spe(vcpu);
else
kvmppc_booke_queue_irqprio(vcpu,
-  BOOKE_IRQPRIO_SPE_UNAVAIL);
+   BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
r = RESUME_GUEST;
break;
}
 
-   case BOOKE_INTERRUPT_SPE_FP_DATA:
-   kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
+   case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
+   kvmppc_booke_queue_irqprio(vcpu,
+   BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
r = RESUME_GUEST;
break;
 
@@ -997,7 +998,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu 
*vcpu,
r = RESUME_GUEST;
break;
 #else
-   case BOOKE_INTERRUPT_SPE_UNAVAIL:
+   case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL:
/*
 * Guest wants SPE, but host kernel doesn't support it.  Send
 * an unimplemented operation program check to the guest.
@@ -1010,7 +1011,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
 * These really should never happen without CONFIG_SPE,
 * as we should never enable the real MSR[SPE] in the guest.
 */
-   case BOOKE_INTERRUPT_SPE_FP_DATA:
+   case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
case BOOKE_INTERRUPT_SPE_FP_ROUND:
printk(KERN_CRIT %s: unexpected SPE interrupt %u at %08lx\n,
   __func__, exit_nr, vcpu-arch.pc);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index b632cd3..f182b32 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -32,8 +32,8 @@
 #define BOOKE_IRQPRIO_ALIGNMENT 2
 #define BOOKE_IRQPRIO_PROGRAM 3
 #define BOOKE_IRQPRIO_FP_UNAVAIL 4
-#define BOOKE_IRQPRIO_SPE_UNAVAIL 5
-#define BOOKE_IRQPRIO_SPE_FP_DATA 6
+#define BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL 5
+#define BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST 6
 #define BOOKE_IRQPRIO_SPE_FP_ROUND 7
 #define BOOKE_IRQPRIO_SYSCALL 8
 #define BOOKE_IRQPRIO_AP_UNAVAIL 9
diff --git a/arch/powerpc/kvm/booke_interrupts.S 
b/arch/powerpc/kvm/booke_interrupts.S
index 2c6deb5ef..a275dc5 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -137,8 +137,9 @@ KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG 
SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
 KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0

[PATCH 4/6 v2] KVM: PPC: Book3E: Add AltiVec support

2014-06-30 Thread Mihai Caraman
Add KVM Book3E AltiVec support. KVM Book3E FPU support gracefully reuse host
infrastructure so follow the same approach for AltiVec.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - integrate Paul's FP/VMX/VSX changes

 arch/powerpc/kvm/booke.c | 67 ++--
 1 file changed, 65 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4cc9b26..4ba75f6 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -100,6 +100,19 @@ static inline bool kvmppc_supports_spe(void)
return false;
 }
 
+/*
+ * Always returns true if AltiVec unit is present,
+ * see kvmppc_core_check_processor_compat().
+ */
+static inline bool kvmppc_supports_altivec(void)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   return true;
+#endif
+   return false;
+}
+
 #ifdef CONFIG_SPE
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 {
@@ -178,6 +191,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (kvmppc_supports_altivec()) {
+   if (!(current-thread.regs-msr  MSR_VEC)) {
+   enable_kernel_altivec();
+   load_vr_state(vcpu-arch.vr);
+   current-thread.vr_save_area = vcpu-arch.vr;
+   current-thread.regs-msr |= MSR_VEC;
+   }
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu AltiVec state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (kvmppc_supports_altivec()) {
+   if (current-thread.regs-msr  MSR_VEC)
+   giveup_altivec(current);
+   current-thread.vr_save_area = NULL;
+   }
+#endif
+}
+
 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
 {
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
@@ -749,6 +796,17 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   /* Save userspace AltiVec state in stack */
+   if (kvmppc_supports_altivec())
+   enable_kernel_altivec();
+   /*
+* Since we can't trap on MSR_VEC in GS-mode, we consider the guest
+* as always using the AltiVec.
+*/
+   kvmppc_load_guest_altivec(vcpu);
+#endif
+
/* Switch to guest debug context */
debug = vcpu-arch.shadow_dbg_reg;
switch_booke_debug_regs(debug);
@@ -771,6 +829,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_save_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   kvmppc_save_guest_altivec(vcpu);
+#endif
+
 out:
vcpu-mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -1014,7 +1076,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
break;
 
case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
-   if (kvmppc_supports_spe()) {
+   if (kvmppc_supports_spe() || kvmppc_supports_altivec()) {
bool enabled = false;
 
 #if !defined(CONFIG_KVM_BOOKE_HV)  defined(CONFIG_SPE)
@@ -1040,7 +1102,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
}
 
case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
-   if (kvmppc_supports_spe()) {
+   if (kvmppc_supports_spe() || kvmppc_supports_altivec()) {
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
r = RESUME_GUEST;
@@ -1249,6 +1311,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
kvmppc_load_guest_fp(vcpu);
+   kvmppc_load_guest_altivec(vcpu);
}
}
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 0/6 v2] KVM: PPC: Book3e: AltiVec support

2014-06-30 Thread Mihai Caraman
Add KVM Book3E AltiVec support and enable e6500 core.

Integrates Paul's FP/VMX/VSX changes that landed in kvm-ppc-queue in January
and take into account feedback.

Mihai Caraman (6):
  KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers
  KVM: PPC: Book3E: Refactor SPE/FP exit handling
  KVM: PPC: Book3E: Increase FPU laziness
  KVM: PPC: Book3E: Add AltiVec support
  KVM: PPC: Book3E: Add ONE_REG AltiVec support
  KVM: PPC: Book3E: Enable e6500 core

 arch/powerpc/include/asm/kvm_asm.h|   8 --
 arch/powerpc/include/uapi/asm/kvm.h   |   5 +
 arch/powerpc/kvm/booke.c  | 238 --
 arch/powerpc/kvm/booke.h  |  38 +-
 arch/powerpc/kvm/booke_interrupts.S   |   9 +-
 arch/powerpc/kvm/bookehv_interrupts.S |   4 +-
 arch/powerpc/kvm/e500.c   |  10 +-
 arch/powerpc/kvm/e500_emulate.c   |  10 +-
 arch/powerpc/kvm/e500mc.c |  12 +-
 9 files changed, 232 insertions(+), 102 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/6 v2] KVM: PPC: Book3E: Increase FPU laziness

2014-06-30 Thread Mihai Caraman
Increase FPU laziness by calling kvmppc_load_guest_fp() just before
returning to guest instead of each sched in. Without this improvement
an interrupt may also claim floting point corrupting guest state.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v2:
 - remove fpu_active
 - add descriptive comments

 arch/powerpc/kvm/booke.c  | 43 ---
 arch/powerpc/kvm/booke.h  | 34 --
 arch/powerpc/kvm/e500mc.c |  2 --
 3 files changed, 36 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 80cd8df..4cc9b26 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -134,6 +134,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 }
 #endif
 
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (!(current-thread.regs-msr  MSR_FP)) {
+   enable_kernel_fp();
+   load_fp_state(vcpu-arch.fp);
+   current-thread.fp_save_area = vcpu-arch.fp;
+   current-thread.regs-msr |= MSR_FP;
+   }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+   if (current-thread.regs-msr  MSR_FP)
+   giveup_fpu(current);
+   current-thread.fp_save_area = NULL;
+#endif
+}
+
 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 {
 #if defined(CONFIG_PPC_FPU)  !defined(CONFIG_KVM_BOOKE_HV)
@@ -710,12 +744,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
/*
 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
-* as always using the FPU.  Kernel usage of FP (via
-* enable_kernel_fp()) in this thread must not occur while
-* vcpu-fpu_active is set.
+* as always using the FPU.
 */
-   vcpu-fpu_active = 1;
-
kvmppc_load_guest_fp(vcpu);
 #endif
 
@@ -739,8 +769,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
 #ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
-
-   vcpu-fpu_active = 0;
 #endif
 
 out:
@@ -1220,6 +1248,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
+   kvmppc_load_guest_fp(vcpu);
}
}
 
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f182b32..faad8af 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -123,40 +123,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu 
*vcpu, int sprn,
 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
  ulong *spr_val);
 
-/*
- * Load up guest vcpu FP state if it's needed.
- * It also set the MSR_FP in thread so that host know
- * we're holding FPU, and then host can help to save
- * guest vcpu FP state if other threads require to use FPU.
- * This simulates an FP unavailable fault.
- *
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  !(current-thread.regs-msr  MSR_FP)) {
-   enable_kernel_fp();
-   load_fp_state(vcpu-arch.fp);
-   current-thread.fp_save_area = vcpu-arch.fp;
-   current-thread.regs-msr |= MSR_FP;
-   }
-#endif
-}
-
-/*
- * Save guest vcpu FP state into thread.
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-   if (vcpu-fpu_active  (current-thread.regs-msr  MSR_FP))
-   giveup_fpu(current);
-   current-thread.fp_save_area = NULL;
-#endif
-}
-
 static inline void kvmppc_clear_dbsr(void)
 {
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 690499d..c60b653 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -145,8 +145,6 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu 
*vcpu, int cpu)
kvmppc_e500_tlbil_all(vcpu_e500);
__get_cpu_var(last_vcpu_of_lpid)[vcpu-kvm-arch.lpid] = vcpu;
}
-
-   kvmppc_load_guest_fp(vcpu);
 }
 
 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
-- 
1.7.11.7

--
To unsubscribe from this list: send

[PATCH 1/5 v4] KVM: PPC: e500mc: Revert add load inst fixup

2014-06-27 Thread Mihai Caraman
The commit 1d628af7 add load inst fixup made an attempt to handle
failures generated by reading the guest current instruction. The fixup
code that was added works by chance hiding the real issue.

Load external pid (lwepx) instruction, used by KVM to read guest
instructions, is executed in a subsituted guest translation context
(EPLC[EGS] = 1). In consequence lwepx's TLB error and data storage
interrupts need to be handled by KVM, even though these interrupts
are generated from host context (MSR[GS] = 0) where lwepx is executed.

Currently, KVM hooks only interrupts generated from guest context
(MSR[GS] = 1), doing minimal checks on the fast path to avoid host
performance degradation. As a result, the host kernel handles lwepx
faults searching the faulting guest data address (loaded in DEAR) in
its own Logical Partition ID (LPID) 0 context. In case a host translation
is found the execution returns to the lwepx instruction instead of the
fixup, the host ending up in an infinite loop.

Revert the commit add load inst fixup. lwepx issue will be addressed
in a subsequent patch without needing fixup code.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4-v2:
 - no change

 arch/powerpc/kvm/bookehv_interrupts.S | 26 +-
 1 file changed, 1 insertion(+), 25 deletions(-)

diff --git a/arch/powerpc/kvm/bookehv_interrupts.S 
b/arch/powerpc/kvm/bookehv_interrupts.S
index a1712b8..6ff4480 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -29,7 +29,6 @@
 #include asm/asm-compat.h
 #include asm/asm-offsets.h
 #include asm/bitsperlong.h
-#include asm/thread_info.h
 
 #ifdef CONFIG_64BIT
 #include asm/exception-64e.h
@@ -164,32 +163,9 @@
PPC_STL r30, VCPU_GPR(R30)(r4)
PPC_STL r31, VCPU_GPR(R31)(r4)
mtspr   SPRN_EPLC, r8
-
-   /* disable preemption, so we are sure we hit the fixup handler */
-   CURRENT_THREAD_INFO(r8, r1)
-   li  r7, 1
-   stw r7, TI_PREEMPT(r8)
-
isync
-
-   /*
-* In case the read goes wrong, we catch it and write an invalid value
-* in LAST_INST instead.
-*/
-1: lwepx   r9, 0, r5
-2:
-.section .fixup, ax
-3: li  r9, KVM_INST_FETCH_FAILED
-   b   2b
-.previous
-.section __ex_table,a
-   PPC_LONG_ALIGN
-   PPC_LONG 1b,3b
-.previous
-
+   lwepx   r9, 0, r5
mtspr   SPRN_EPLC, r3
-   li  r7, 0
-   stw r7, TI_PREEMPT(r8)
stw r9, VCPU_LAST_INST(r4)
.endif
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 5/5 v4] KVM: PPC: Bookehv: Get vcpu's last instruction for emulation

2014-06-27 Thread Mihai Caraman
On book3e, KVM uses load external pid (lwepx) dedicated instruction to read
guest last instruction on the exit path. lwepx exceptions (DTLB_MISS, DSI
and LRAT), generated by loading a guest address, needs to be handled by KVM.
These exceptions are generated in a substituted guest translation context
(EPLC[EGS] = 1) from host context (MSR[GS] = 0).

Currently, KVM hooks only interrupts generated from guest context (MSR[GS] = 1),
doing minimal checks on the fast path to avoid host performance degradation.
lwepx exceptions originate from host state (MSR[GS] = 0) which implies
additional checks in DO_KVM macro (beside the current MSR[GS] = 1) by looking
at the Exception Syndrome Register (ESR[EPID]) and the External PID Load Context
Register (EPLC[EGS]). Doing this on each Data TLB miss exception is obvious
too intrusive for the host.

Read guest last instruction from kvmppc_load_last_inst() by searching for the
physical address and kmap it. This address the TODO for TLB eviction and
execute-but-not-read entries, and allow us to get rid of lwepx until we are
able to handle failures.

A simple stress benchmark shows a 1% sys performance degradation compared with
previous approach (lwepx without failure handling):

time for i in `seq 1 1`; do /bin/echo  /dev/null; done

real0m 8.85s
user0m 4.34s
sys 0m 4.48s

vs

real0m 8.84s
user0m 4.36s
sys 0m 4.44s

An alternative solution, to handle lwepx exceptions in KVM, is to temporary
highjack the interrupt vector from host. Some cores share host IVOR registers
between hardware threads, which is the case of FSL e6500, which impose 
additional
synchronization logic for this solution to work. The optimization can be 
addressed
later on top of this patch.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - add switch and new function when getting last inst earlier
 - use enum instead of prev semnatic
 - get rid of mas0, optimize mas7_mas3
 - give more context in visible messages
 - check storage attributes mismatch on MMUv2
 - get rid of pfn_valid check

v3:
 - reworked patch description
 - use unaltered kmap addr for kunmap
 - get last instruction before beeing preempted

v2:
 - reworked patch description
 - used pr_* functions
 - addressed cosmetic feedback

 arch/powerpc/kvm/booke.c  | 44 +
 arch/powerpc/kvm/bookehv_interrupts.S | 37 --
 arch/powerpc/kvm/e500_mmu_host.c  | 91 +++
 3 files changed, 144 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 34a42b9..843077b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -869,6 +869,28 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
 }
 
+static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ enum emulation_result emulated, u32 last_inst)
+{
+   switch (emulated) {
+   case EMULATE_AGAIN:
+   return RESUME_GUEST;
+
+   case EMULATE_FAIL:
+   pr_debug(%s: load instruction from guest address %lx failed\n,
+  __func__, vcpu-arch.pc);
+   /* For debugging, encode the failing instruction and
+* report it to userspace. */
+   run-hw.hardware_exit_reason = ~0ULL  32;
+   run-hw.hardware_exit_reason |= last_inst;
+   kvmppc_core_queue_program(vcpu, ESR_PIL);
+   return RESUME_HOST;
+
+   default:
+   BUG();
+   }
+}
+
 /**
  * kvmppc_handle_exit
  *
@@ -880,6 +902,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu 
*vcpu,
int r = RESUME_HOST;
int s;
int idx;
+   u32 last_inst = KVM_INST_FETCH_FAILED;
+   enum emulation_result emulated = EMULATE_DONE;
 
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -887,6 +911,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
/* restart interrupts if they were meant for the host */
kvmppc_restart_interrupt(vcpu, exit_nr);
 
+   /*
+* get last instruction before beeing preempted
+* TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR  ESR_DATA
+*/
+   switch (exit_nr) {
+   case BOOKE_INTERRUPT_DATA_STORAGE:
+   case BOOKE_INTERRUPT_DTLB_MISS:
+   case BOOKE_INTERRUPT_HV_PRIV:
+   emulated = kvmppc_get_last_inst(vcpu, false, last_inst);
+   break;
+   default:
+   break;
+   }
+
local_irq_enable();
 
trace_kvm_exit(exit_nr, vcpu);
@@ -895,6 +933,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
run-exit_reason = KVM_EXIT_UNKNOWN;
run-ready_for_interrupt_injection = 1;
 
+   if (emulated != EMULATE_DONE) {
+   r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst

[PATCH 3/5 v4] KVM: PPC: Book3s: Remove kvmppc_read_inst() function

2014-06-27 Thread Mihai Caraman
In the context of replacing kvmppc_ld() function calls with a version of
kvmppc_get_last_inst() which allow to fail, Alex Graf suggested this:

If we get EMULATE_AGAIN, we just have to make sure we go back into the guest.
No need to inject an ISI into  the guest - it'll do that all by itself.
With an error returning kvmppc_get_last_inst we can just use completely
get rid of kvmppc_read_inst() and only use kvmppc_get_last_inst() instead.

As a intermediate step get rid of kvmppc_read_inst() and only use kvmppc_ld()
instead.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - new patch

 arch/powerpc/kvm/book3s_pr.c | 85 ++--
 1 file changed, 35 insertions(+), 50 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 15fd6c2..d247d88 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -665,42 +665,6 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong 
fac)
 #endif
 }
 
-static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
-{
-   ulong srr0 = kvmppc_get_pc(vcpu);
-   u32 last_inst = kvmppc_get_last_inst(vcpu);
-   int ret;
-
-   ret = kvmppc_ld(vcpu, srr0, sizeof(u32), last_inst, false);
-   if (ret == -ENOENT) {
-   ulong msr = kvmppc_get_msr(vcpu);
-
-   msr = kvmppc_set_field(msr, 33, 33, 1);
-   msr = kvmppc_set_field(msr, 34, 36, 0);
-   msr = kvmppc_set_field(msr, 42, 47, 0);
-   kvmppc_set_msr_fast(vcpu, msr);
-   kvmppc_book3s_queue_irqprio(vcpu, 
BOOK3S_INTERRUPT_INST_STORAGE);
-   return EMULATE_AGAIN;
-   }
-
-   return EMULATE_DONE;
-}
-
-static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
-{
-
-   /* Need to do paired single emulation? */
-   if (!(vcpu-arch.hflags  BOOK3S_HFLAG_PAIRED_SINGLE))
-   return EMULATE_DONE;
-
-   /* Read out the instruction */
-   if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
-   /* Need to emulate */
-   return EMULATE_FAIL;
-
-   return EMULATE_AGAIN;
-}
-
 /* Handle external providers (FPU, Altivec, VSX) */
 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
 ulong msr)
@@ -1101,31 +1065,51 @@ program_interrupt:
case BOOK3S_INTERRUPT_VSX:
{
int ext_msr = 0;
+   int emul;
+   ulong pc;
+   u32 last_inst;
 
-   switch (exit_nr) {
-   case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP;  break;
-   case BOOK3S_INTERRUPT_ALTIVEC:ext_msr = MSR_VEC; break;
-   case BOOK3S_INTERRUPT_VSX:ext_msr = MSR_VSX; break;
-   }
+   if (!(vcpu-arch.hflags  BOOK3S_HFLAG_PAIRED_SINGLE)) {
+   /* Do paired single emulation */
+
+   switch (exit_nr) {
+   case BOOK3S_INTERRUPT_FP_UNAVAIL:
+   ext_msr = MSR_FP;
+   break;
+
+   case BOOK3S_INTERRUPT_ALTIVEC:
+   ext_msr = MSR_VEC;
+   break;
+
+   case BOOK3S_INTERRUPT_VSX:
+   ext_msr = MSR_VSX;
+   break;
+   }
 
-   switch (kvmppc_check_ext(vcpu, exit_nr)) {
-   case EMULATE_DONE:
-   /* everything ok - let's enable the ext */
r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
break;
-   case EMULATE_FAIL:
+   }
+
+   pc = kvmppc_get_pc(vcpu);
+   last_inst = kvmppc_get_last_inst(vcpu);
+   emul = kvmppc_ld(vcpu, pc, sizeof(u32), last_inst, false);
+   if (emul == EMULATE_DONE) {
/* we need to emulate this instruction */
goto program_interrupt;
break;
-   default:
-   /* nothing to worry about - go again */
-   break;
+   } else {
+   r = RESUME_GUEST;
}
+
break;
}
case BOOK3S_INTERRUPT_ALIGNMENT:
-   if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
-   u32 last_inst = kvmppc_get_last_inst(vcpu);
+   {
+   ulong pc = kvmppc_get_pc(vcpu);
+   u32 last_inst = kvmppc_get_last_inst(vcpu);
+   int emul = kvmppc_ld(vcpu, pc, sizeof(u32), last_inst, false);
+
+   if (emul == EMULATE_DONE) {
u32 dsisr;
u64 dar;
 
@@ -1139,6 +1123,7 @@ program_interrupt:
}
r = RESUME_GUEST;
break;
+   }
 #ifdef CONFIG_PPC_BOOK3S_64

[PATCH 2/5 v4] KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1

2014-06-27 Thread Mihai Caraman
Add mising defines MAS0_GET_TLBSEL() and MAS1_GET_TSIZE() for Book3E.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4-v2:
 - no change

 arch/powerpc/include/asm/mmu-book3e.h | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 901dac6..60a949a 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,7 +40,11 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL(x) (((x)  28)  0x3000)
+#define MAS0_TLBSEL_MASK   0x3000
+#define MAS0_TLBSEL_SHIFT  28
+#define MAS0_TLBSEL(x) (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
+#define MAS0_GET_TLBSEL(mas0)  (((mas0)  MAS0_TLBSEL_MASK)  \
+   MAS0_TLBSEL_SHIFT)
 #define MAS0_ESEL_MASK 0x0FFF
 #define MAS0_ESEL_SHIFT16
 #define MAS0_ESEL(x)   (((x)  MAS0_ESEL_SHIFT)  MAS0_ESEL_MASK)
@@ -58,6 +62,7 @@
 #define MAS1_TSIZE_MASK0x0f80
 #define MAS1_TSIZE_SHIFT   7
 #define MAS1_TSIZE(x)  (((x)  MAS1_TSIZE_SHIFT)  MAS1_TSIZE_MASK)
+#define MAS1_GET_TSIZE(mas1)   (((mas1)  MAS1_TSIZE_MASK)  MAS1_TSIZE_SHIFT)
 
 #define MAS2_EPN   (~0xFFFUL)
 #define MAS2_X00x0040
-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 4/5 v4] KVM: PPC: Alow kvmppc_get_last_inst() to fail

2014-06-27 Thread Mihai Caraman
On book3e, guest last instruction is read on the exit path using load
external pid (lwepx) dedicated instruction. This load operation may fail
due to TLB eviction and execute-but-not-read entries.

This patch lay down the path for an alternative solution to read the guest
last instruction, by allowing kvmppc_get_lat_inst() function to fail.
Architecture specific implmentations of kvmppc_load_last_inst() may read
last guest instruction and instruct the emulation layer to re-execute the
guest in case of failure.

Make kvmppc_get_last_inst() definition common between architectures.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
v4:
 - these changes compile on book3s, please validate the functionality and
   do the necessary adaptations!
 - common declaration and enum for kvmppc_load_last_inst()
 - remove kvmppc_read_inst() in a preceding patch

v3:
 - rework patch description
 - add common definition for kvmppc_get_last_inst()
 - check return values in book3s code

v2:
 - integrated kvmppc_get_last_inst() in book3s code and checked build
 - addressed cosmetic feedback

 arch/powerpc/include/asm/kvm_book3s.h| 26 --
 arch/powerpc/include/asm/kvm_booke.h |  5 
 arch/powerpc/include/asm/kvm_ppc.h   | 24 +
 arch/powerpc/kvm/book3s.c| 11 
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 17 
 arch/powerpc/kvm/book3s_paired_singles.c | 38 +--
 arch/powerpc/kvm/book3s_pr.c | 45 
 arch/powerpc/kvm/booke.c |  3 +++
 arch/powerpc/kvm/e500_mmu_host.c |  6 +
 arch/powerpc/kvm/emulate.c   | 18 -
 arch/powerpc/kvm/powerpc.c   | 11 ++--
 11 files changed, 128 insertions(+), 76 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index ceb70aa..1300cd9 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return (kvmppc_get_msr(vcpu)  MSR_LE) != (MSR_KERNEL  MSR_LE);
 }
 
-static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong 
pc)
-{
-   /* Load the instruction manually if it failed to do so in the
-* exit path */
-   if (vcpu-arch.last_inst == KVM_INST_FETCH_FAILED)
-   kvmppc_ld(vcpu, pc, sizeof(u32), vcpu-arch.last_inst, false);
-
-   return kvmppc_need_byteswap(vcpu) ? swab32(vcpu-arch.last_inst) :
-   vcpu-arch.last_inst;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
-   return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
-}
-
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 {
return vcpu-arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_booke.h 
b/arch/powerpc/include/asm/kvm_booke.h
index c7aed61..cbb1990 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu 
*vcpu)
return false;
 }
 
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-   return vcpu-arch.last_inst;
-}
-
 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 {
vcpu-arch.ctr = val;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index e2fd5a1..ec326c8 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -47,6 +47,11 @@ enum emulation_result {
EMULATE_EXIT_USER,/* emulation requires exit to user-space */
 };
 
+enum instruction_type {
+   INST_GENERIC,
+   INST_SC,/* system call */
+};
+
 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
@@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
   u64 val, unsigned int bytes,
   int is_default_endian);
 
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+enum instruction_type type, u32 *inst);
+
 extern int kvmppc_emulate_instruction(struct kvm_run *run,
   struct kvm_vcpu *vcpu);
 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
@@ -234,6 +242,22 @@ struct kvmppc_ops {
 extern struct kvmppc_ops *kvmppc_hv_ops;
 extern

[PATCH 0/5 v4] Read guest last instruction from kvmppc_get_last_inst()

2014-06-27 Thread Mihai Caraman
Read guest last instruction from kvmppc_get_last_inst() allowing the function
to fail in order to emulate again. On bookehv architecture search for
the physical address and kmap it, instead of using Load External PID (lwepx)
instruction. This fixes an infinite loop caused by lwepx's data TLB miss
exception handled in the host and the TODO for execute-but-not-read entries
and TLB eviction.

Mihai Caraman (5):
  KVM: PPC: e500mc: Revert add load inst fixup
  KVM: PPC: Book3e: Add TLBSEL/TSIZE defines for MAS0/1
  KVM: PPC: Book3s: Remove kvmppc_read_inst() function
  KVM: PPC: Alow kvmppc_get_last_inst() to fail
  KVM: PPC: Bookehv: Get vcpu's last instruction for emulation

 arch/powerpc/include/asm/kvm_book3s.h|  26 ---
 arch/powerpc/include/asm/kvm_booke.h |   5 --
 arch/powerpc/include/asm/kvm_ppc.h   |  24 +++
 arch/powerpc/include/asm/mmu-book3e.h|   7 +-
 arch/powerpc/kvm/book3s.c|  11 +++
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  17 ++---
 arch/powerpc/kvm/book3s_paired_singles.c |  38 ++
 arch/powerpc/kvm/book3s_pr.c | 116 +--
 arch/powerpc/kvm/booke.c |  47 +
 arch/powerpc/kvm/bookehv_interrupts.S|  55 ++-
 arch/powerpc/kvm/e500_mmu_host.c |  97 ++
 arch/powerpc/kvm/emulate.c   |  18 +++--
 arch/powerpc/kvm/powerpc.c   |  10 ++-
 13 files changed, 302 insertions(+), 169 deletions(-)

-- 
1.7.11.7

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


  1   2   3   4   >