RE: [RFC PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support

2013-06-05 Thread Caraman Mihai Claudiu-B02008
  + * Simulate AltiVec unavailable fault to load guest state
  + * from thread to AltiVec unit.
  + * It requires to be called with preemption disabled.
  + */
  +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
  +{
  +#ifdef CONFIG_ALTIVEC
  +   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
  +   if (!(current-thread.regs-msr  MSR_VEC)) {
  +   load_up_altivec(NULL);
  +   current-thread.regs-msr |= MSR_VEC;
  +   }
  +   }
  +#endif
 
 Why not use kvmppc_supports_altivec()?  In fact, there's nothing
 KVM-specific about these functions...

I will do so, I had this code before kvmppc_supports_altivec() :)

   static inline bool kvmppc_supports_spe(void)
   {
   #ifdef CONFIG_SPE
  @@ -947,7 +1016,7 @@ int kvmppc_handle_exit(struct kvm_run *run,
  struct kvm_vcpu *vcpu,
   */
  bool handled = false;
 
  -   if (kvmppc_supports_spe()) {
  +   if (kvmppc_supports_altivec() || kvmppc_supports_spe())
  {
   #ifdef CONFIG_SPE
  if (cpu_has_feature(CPU_FTR_SPE))
  if (vcpu-arch.shared-msr  MSR_SPE) {
 
 The distinction between how you're handling SPE and Altivec here
 doesn't really have anything to do with SPE versus Altivec -- it's
 PR-mode versus HV-mode.

I was mislead by MSR_SPE bit, we should rename it as MSR_SPV.

-Mike


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [RFC PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support

2013-06-05 Thread Caraman Mihai Claudiu-B02008
  + * Simulate AltiVec unavailable fault to load guest state
  + * from thread to AltiVec unit.
  + * It requires to be called with preemption disabled.
  + */
  +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
  +{
  +#ifdef CONFIG_ALTIVEC
  +   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
  +   if (!(current-thread.regs-msr  MSR_VEC)) {
  +   load_up_altivec(NULL);
  +   current-thread.regs-msr |= MSR_VEC;
  +   }
  +   }
  +#endif
 
 Why not use kvmppc_supports_altivec()?  In fact, there's nothing
 KVM-specific about these functions...

I will do so, I had this code before kvmppc_supports_altivec() :)

   static inline bool kvmppc_supports_spe(void)
   {
   #ifdef CONFIG_SPE
  @@ -947,7 +1016,7 @@ int kvmppc_handle_exit(struct kvm_run *run,
  struct kvm_vcpu *vcpu,
   */
  bool handled = false;
 
  -   if (kvmppc_supports_spe()) {
  +   if (kvmppc_supports_altivec() || kvmppc_supports_spe())
  {
   #ifdef CONFIG_SPE
  if (cpu_has_feature(CPU_FTR_SPE))
  if (vcpu-arch.shared-msr  MSR_SPE) {
 
 The distinction between how you're handling SPE and Altivec here
 doesn't really have anything to do with SPE versus Altivec -- it's
 PR-mode versus HV-mode.

I was mislead by MSR_SPE bit, we should rename it as MSR_SPV.

-Mike


--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support

2013-06-04 Thread Scott Wood

On 06/03/2013 03:54:26 PM, Mihai Caraman wrote:
KVM Book3E FPU support gracefully reuse host infrastructure so we do  
the
same for AltiVec. To keep AltiVec lazy call  
kvmppc_load_guest_altivec()

just when returning to guest instead of each sched in.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kvm/booke.c  |   74  
+++-

 arch/powerpc/kvm/e500mc.c |8 +
 2 files changed, 80 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index c08b04b..01eb635 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -134,6 +134,23 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu  
*vcpu)

 }

 /*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (!(current-thread.regs-msr  MSR_VEC)) {
+   load_up_altivec(NULL);
+   current-thread.regs-msr |= MSR_VEC;
+   }
+   }
+#endif


Why not use kvmppc_supports_altivec()?  In fact, there's nothing  
KVM-specific about these functions...



+/*
+ * Always returns true is AltiVec unit is present, see
+ * kvmppc_core_check_processor_compat().
+ */
+static inline bool kvmppc_supports_altivec(void)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   return true;
+#endif
+   return false;
+}


Whitespace


 static inline bool kvmppc_supports_spe(void)
 {
 #ifdef CONFIG_SPE
@@ -947,7 +1016,7 @@ int kvmppc_handle_exit(struct kvm_run *run,  
struct kvm_vcpu *vcpu,

 */
bool handled = false;

-   if (kvmppc_supports_spe()) {
+		if (kvmppc_supports_altivec() || kvmppc_supports_spe())  
{

 #ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE))
if (vcpu-arch.shared-msr  MSR_SPE) {
@@ -976,7 +1045,7 @@ int kvmppc_handle_exit(struct kvm_run *run,  
struct kvm_vcpu *vcpu,
 		 * The interrupt is shared, KVM support for the  
featured unit

 * is detected at run-time.
 */
-   if (kvmppc_supports_spe()) {
+		if (kvmppc_supports_altivec() || kvmppc_supports_spe())  
{

kvmppc_booke_queue_irqprio(vcpu,
  
BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);

r = RESUME_GUEST;


The distinction between how you're handling SPE and Altivec here  
doesn't really have anything to do with SPE versus Altivec -- it's  
PR-mode versus HV-mode.


@@ -1188,6 +1257,7 @@ int kvmppc_handle_exit(struct kvm_run *run,  
struct kvm_vcpu *vcpu,
 			r = (s  2) | RESUME_HOST | (r   
RESUME_FLAG_NV);

} else {
kvmppc_lazy_ee_enable();
+   kvmppc_load_guest_altivec(vcpu);
}
}



Why do you need to call an Altivec function here if we don't need to  
call an ordinary FPU function here?


-Scott
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support

2013-06-03 Thread Mihai Caraman
KVM Book3E FPU support gracefully reuse host infrastructure so we do the
same for AltiVec. To keep AltiVec lazy call kvmppc_load_guest_altivec()
just when returning to guest instead of each sched in.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/kvm/booke.c  |   74 +++-
 arch/powerpc/kvm/e500mc.c |8 +
 2 files changed, 80 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index c08b04b..01eb635 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -134,6 +134,23 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 }
 
 /*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   if (!(current-thread.regs-msr  MSR_VEC)) {
+   load_up_altivec(NULL);
+   current-thread.regs-msr |= MSR_VEC;
+   }
+   }
+#endif
+}
+
+/*
  * Helper function for full MSR writes.  No need to call this if only
  * EE/CE/ME/DE/RI are changing.
  */
@@ -661,6 +678,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
u64 fpr[32];
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   vector128 vr[32];
+   vector128 vscr;
+   int used_vr = 0;
+#endif
+
if (!vcpu-arch.sane) {
kvm_run-exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
@@ -699,6 +722,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   /* Save userspace VEC state in stack */
+   enable_kernel_altivec();
+   memcpy(vr, current-thread.vr, sizeof(current-thread.vr));
+   vscr = current-thread.vscr;
+   used_vr = current-thread.used_vr;
+
+   /* Restore guest VEC state to thread */
+   memcpy(current-thread.vr, vcpu-arch.vr, 
sizeof(vcpu-arch.vr));
+   current-thread.vscr = vcpu-arch.vscr;
+
+   kvmppc_load_guest_altivec(vcpu);
+   }
+#endif
+
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
/* No need for kvm_guest_exit. It's done in handle_exit.
@@ -719,6 +758,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
current-thread.fpexc_mode = fpexc_mode;
 #endif
 
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+   /* Save AltiVec state to thread */
+   if (current-thread.regs-msr  MSR_VEC)
+   giveup_altivec(current);
+
+   /* Save guest state */
+   memcpy(vcpu-arch.vr, current-thread.vr, 
sizeof(vcpu-arch.vr));
+   vcpu-arch.vscr = current-thread.vscr;
+
+   /* Restore userspace state */
+   memcpy(current-thread.vr, vr, sizeof(current-thread.vr));
+   current-thread.vscr = vscr;
+   current-thread.used_vr = used_vr;
+   }
+#endif
+
 out:
vcpu-mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -822,6 +878,19 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
 }
 
+/*
+ * Always returns true is AltiVec unit is present, see
+ * kvmppc_core_check_processor_compat().
+ */
+static inline bool kvmppc_supports_altivec(void)
+{
+#ifdef CONFIG_ALTIVEC
+   if (cpu_has_feature(CPU_FTR_ALTIVEC))
+   return true;
+#endif
+   return false;
+}
+
 static inline bool kvmppc_supports_spe(void)
 {
 #ifdef CONFIG_SPE
@@ -947,7 +1016,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
 */
bool handled = false;
 
-   if (kvmppc_supports_spe()) {
+   if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
 #ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE))
if (vcpu-arch.shared-msr  MSR_SPE) {
@@ -976,7 +1045,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
 * The interrupt is shared, KVM support for the featured unit
 * is detected at run-time.
 */
-   if (kvmppc_supports_spe()) {
+   if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
r = RESUME_GUEST;
@@ -1188,6 +1257,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
r = (s  2) | RESUME_HOST | (r  RESUME_FLAG_NV);
} else {
kvmppc_lazy_ee_enable();
+