Add wrappers for vr registers to prepare for supporting PAPR nested
guests.

Signed-off-by: Jordan Niethe <j...@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/kvm_book3s.h | 20 +++++++++++
 arch/powerpc/kvm/powerpc.c            | 50 +++++++++++++--------------
 2 files changed, 45 insertions(+), 25 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index a632e79639f0..77653c5b356b 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -444,6 +444,26 @@ static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu 
*vcpu, int i, int j, u64 v
        vcpu->arch.fp.fpr[i][j] = val;
 }
 
+static inline vector128 kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i)
+{
+       return vcpu->arch.vr.vr[i];
+}
+
+static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 
val)
+{
+       vcpu->arch.vr.vr[i] = val;
+}
+
+static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.vr.vscr.u[3];
+}
+
+static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
+{
+       vcpu->arch.vr.vscr.u[3] = val;
+}
+
 #define BOOK3S_WRAPPER_SET(reg, size)                                  \
 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)        
\
 {                                                                      \
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9468df8d9987..c1084d40e292 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -932,9 +932,9 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu 
*vcpu,
                return;
 
        if (index >= 32) {
-               val.vval = VCPU_VSX_VR(vcpu, index - 32);
+               val.vval = kvmppc_get_vsx_vr(vcpu, index - 32);
                val.vsxval[offset] = gpr;
-               VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+               kvmppc_set_vsx_vr(vcpu, index - 32, val.vval);
        } else {
                kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
        }
@@ -947,10 +947,10 @@ static inline void kvmppc_set_vsr_dword_dump(struct 
kvm_vcpu *vcpu,
        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
 
        if (index >= 32) {
-               val.vval = VCPU_VSX_VR(vcpu, index - 32);
+               val.vval = kvmppc_get_vsx_vr(vcpu, index - 32);
                val.vsxval[0] = gpr;
                val.vsxval[1] = gpr;
-               VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+               kvmppc_set_vsx_vr(vcpu, index - 32, val.vval);
        } else {
                kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
                kvmppc_set_vsx_fpr(vcpu, index, 1,  gpr);
@@ -968,7 +968,7 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu 
*vcpu,
                val.vsx32val[1] = gpr;
                val.vsx32val[2] = gpr;
                val.vsx32val[3] = gpr;
-               VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+               kvmppc_set_vsx_vr(vcpu, index - 32, val.vval);
        } else {
                val.vsx32val[0] = gpr;
                val.vsx32val[1] = gpr;
@@ -989,9 +989,9 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu 
*vcpu,
                return;
 
        if (index >= 32) {
-               val.vval = VCPU_VSX_VR(vcpu, index - 32);
+               val.vval = kvmppc_get_vsx_vr(vcpu, index - 32);
                val.vsx32val[offset] = gpr32;
-               VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+               kvmppc_set_vsx_vr(vcpu, index - 32, val.vval);
        } else {
                dword_offset = offset / 2;
                word_offset = offset % 2;
@@ -1056,9 +1056,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu 
*vcpu,
        if (offset == -1)
                return;
 
-       val.vval = VCPU_VSX_VR(vcpu, index);
+       val.vval = kvmppc_get_vsx_vr(vcpu, index);
        val.vsxval[offset] = gpr;
-       VCPU_VSX_VR(vcpu, index) = val.vval;
+       kvmppc_set_vsx_vr(vcpu, index, val.vval);
 }
 
 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
@@ -1072,9 +1072,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu 
*vcpu,
        if (offset == -1)
                return;
 
-       val.vval = VCPU_VSX_VR(vcpu, index);
+       val.vval = kvmppc_get_vsx_vr(vcpu, index);
        val.vsx32val[offset] = gpr32;
-       VCPU_VSX_VR(vcpu, index) = val.vval;
+       kvmppc_set_vsx_vr(vcpu, index, val.vval);
 }
 
 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
@@ -1088,9 +1088,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu 
*vcpu,
        if (offset == -1)
                return;
 
-       val.vval = VCPU_VSX_VR(vcpu, index);
+       val.vval = kvmppc_get_vsx_vr(vcpu, index);
        val.vsx16val[offset] = gpr16;
-       VCPU_VSX_VR(vcpu, index) = val.vval;
+       kvmppc_set_vsx_vr(vcpu, index, val.vval);
 }
 
 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
@@ -1104,9 +1104,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu 
*vcpu,
        if (offset == -1)
                return;
 
-       val.vval = VCPU_VSX_VR(vcpu, index);
+       val.vval = kvmppc_get_vsx_vr(vcpu, index);
        val.vsx8val[offset] = gpr8;
-       VCPU_VSX_VR(vcpu, index) = val.vval;
+       kvmppc_set_vsx_vr(vcpu, index, val.vval);
 }
 #endif /* CONFIG_ALTIVEC */
 
@@ -1419,7 +1419,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu 
*vcpu, int rs, u64 *val)
                if (rs < 32) {
                        *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
                } else {
-                       reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+                       reg.vval = kvmppc_get_vsx_vr(vcpu, rs - 32);
                        *val = reg.vsxval[vsx_offset];
                }
                break;
@@ -1439,7 +1439,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu 
*vcpu, int rs, u64 *val)
                        reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, 
dword_offset);
                        *val = reg.vsx32val[word_offset];
                } else {
-                       reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+                       reg.vval = kvmppc_get_vsx_vr(vcpu, rs - 32);
                        *val = reg.vsx32val[vsx_offset];
                }
                break;
@@ -1554,7 +1554,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, 
int index, u64 *val)
        if (vmx_offset == -1)
                return -1;
 
-       reg.vval = VCPU_VSX_VR(vcpu, index);
+       reg.vval = kvmppc_get_vsx_vr(vcpu, index);
        *val = reg.vsxval[vmx_offset];
 
        return result;
@@ -1572,7 +1572,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int 
index, u64 *val)
        if (vmx_offset == -1)
                return -1;
 
-       reg.vval = VCPU_VSX_VR(vcpu, index);
+       reg.vval = kvmppc_get_vsx_vr(vcpu, index);
        *val = reg.vsx32val[vmx_offset];
 
        return result;
@@ -1590,7 +1590,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, 
int index, u64 *val)
        if (vmx_offset == -1)
                return -1;
 
-       reg.vval = VCPU_VSX_VR(vcpu, index);
+       reg.vval = kvmppc_get_vsx_vr(vcpu, index);
        *val = reg.vsx16val[vmx_offset];
 
        return result;
@@ -1608,7 +1608,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int 
index, u64 *val)
        if (vmx_offset == -1)
                return -1;
 
-       reg.vval = VCPU_VSX_VR(vcpu, index);
+       reg.vval = kvmppc_get_vsx_vr(vcpu, index);
        *val = reg.vsx8val[vmx_offset];
 
        return result;
@@ -1717,14 +1717,14 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
                                r = -ENXIO;
                                break;
                        }
-                       val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+                       val.vval = kvmppc_get_vsx_vr(vcpu, reg->id - 
KVM_REG_PPC_VR0);
                        break;
                case KVM_REG_PPC_VSCR:
                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
                                r = -ENXIO;
                                break;
                        }
-                       val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+                       val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
                        break;
                case KVM_REG_PPC_VRSAVE:
                        val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu));
@@ -1768,14 +1768,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
                                r = -ENXIO;
                                break;
                        }
-                       vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+                       kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, 
val.vval);
                        break;
                case KVM_REG_PPC_VSCR:
                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
                                r = -ENXIO;
                                break;
                        }
-                       vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+                       kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
                        break;
                case KVM_REG_PPC_VRSAVE:
                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-- 
2.31.1

Reply via email to