Add nested virtualization CSRs to the KVM RISC-V ONE_REG interface
so that it can be updated from KVM user-space.

Signed-off-by: Anup Patel <[email protected]>
---
 arch/riscv/include/asm/kvm_vcpu_nested.h |  5 ++++
 arch/riscv/include/uapi/asm/kvm.h        | 27 +++++++++++++++++++++
 arch/riscv/kvm/vcpu_nested_csr.c         | 28 ++++++++++++++++++++++
 arch/riscv/kvm/vcpu_onereg.c             | 30 ++++++++++++++++++++++--
 4 files changed, 88 insertions(+), 2 deletions(-)

diff --git a/arch/riscv/include/asm/kvm_vcpu_nested.h 
b/arch/riscv/include/asm/kvm_vcpu_nested.h
index db6d89cf9771..9ae0e3795522 100644
--- a/arch/riscv/include/asm/kvm_vcpu_nested.h
+++ b/arch/riscv/include/asm/kvm_vcpu_nested.h
@@ -111,6 +111,11 @@ int kvm_riscv_vcpu_nested_hext_csr_rmw(struct kvm_vcpu 
*vcpu, unsigned int csr_n
 
 void kvm_riscv_vcpu_nested_csr_reset(struct kvm_vcpu *vcpu);
 
+int kvm_riscv_vcpu_nested_set_csr(struct kvm_vcpu *vcpu, unsigned long reg_num,
+                                 unsigned long reg_val);
+int kvm_riscv_vcpu_nested_get_csr(struct kvm_vcpu *vcpu, unsigned long reg_num,
+                                 unsigned long *out_val);
+
 int kvm_riscv_vcpu_nested_swtlb_xlate(struct kvm_vcpu *vcpu,
                                      const struct kvm_cpu_trap *trap,
                                      struct kvm_gstage_mapping *out_map,
diff --git a/arch/riscv/include/uapi/asm/kvm.h 
b/arch/riscv/include/uapi/asm/kvm.h
index f62eaa47745b..a486d73e64ce 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -103,6 +103,30 @@ struct kvm_riscv_smstateen_csr {
        unsigned long sstateen0;
 };
 
+/* H-extension CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_hext_csr {
+       unsigned long hstatus;
+       unsigned long hedeleg;
+       unsigned long hideleg;
+       unsigned long hvip;
+       unsigned long hcounteren;
+       unsigned long htimedelta;
+       unsigned long htimedeltah;
+       unsigned long htval;
+       unsigned long htinst;
+       unsigned long henvcfg;
+       unsigned long henvcfgh;
+       unsigned long hgatp;
+       unsigned long vsstatus;
+       unsigned long vsie;
+       unsigned long vstvec;
+       unsigned long vsscratch;
+       unsigned long vsepc;
+       unsigned long vscause;
+       unsigned long vstval;
+       unsigned long vsatp;
+};
+
 /* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
 struct kvm_riscv_timer {
        __u64 frequency;
@@ -264,12 +288,15 @@ struct kvm_riscv_sbi_fwft {
 #define KVM_REG_RISCV_CSR_GENERAL      (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
 #define KVM_REG_RISCV_CSR_AIA          (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
 #define KVM_REG_RISCV_CSR_SMSTATEEN    (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_HEXT         (0x3 << KVM_REG_RISCV_SUBTYPE_SHIFT)
 #define KVM_REG_RISCV_CSR_REG(name)    \
                (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long))
 #define KVM_REG_RISCV_CSR_AIA_REG(name)        \
        (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long))
 #define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name)  \
        (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_CSR_HEXT_REG(name)  \
+       (offsetof(struct kvm_riscv_hext_csr, name) / sizeof(unsigned long))
 
 /* Timer registers are mapped as type 4 */
 #define KVM_REG_RISCV_TIMER            (0x04 << KVM_REG_RISCV_TYPE_SHIFT)
diff --git a/arch/riscv/kvm/vcpu_nested_csr.c b/arch/riscv/kvm/vcpu_nested_csr.c
index 0e427f224954..887e84d15321 100644
--- a/arch/riscv/kvm/vcpu_nested_csr.c
+++ b/arch/riscv/kvm/vcpu_nested_csr.c
@@ -359,3 +359,31 @@ void kvm_riscv_vcpu_nested_csr_reset(struct kvm_vcpu *vcpu)
 
        memset(nsc, 0, sizeof(*nsc));
 }
+
+int kvm_riscv_vcpu_nested_set_csr(struct kvm_vcpu *vcpu, unsigned long reg_num,
+                                 unsigned long reg_val)
+{
+       struct kvm_vcpu_nested_csr *nsc = &vcpu->arch.nested.csr;
+
+       if (!riscv_isa_extension_available(vcpu->arch.isa, h))
+               return -ENOENT;
+       if (reg_num >= sizeof(struct kvm_riscv_hext_csr) / sizeof(unsigned 
long))
+               return -ENOENT;
+
+       ((unsigned long *)nsc)[reg_num] = reg_val;
+       return 0;
+}
+
+int kvm_riscv_vcpu_nested_get_csr(struct kvm_vcpu *vcpu, unsigned long reg_num,
+                                 unsigned long *out_val)
+{
+       struct kvm_vcpu_nested_csr *nsc = &vcpu->arch.nested.csr;
+
+       if (!riscv_isa_extension_available(vcpu->arch.isa, h))
+               return -ENOENT;
+       if (reg_num >= sizeof(struct kvm_riscv_hext_csr) / sizeof(unsigned 
long))
+               return -ENOENT;
+
+       *out_val = ((unsigned long *)nsc)[reg_num];
+       return 0;
+}
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 5f0d10beeb98..6bae3753b924 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -367,6 +367,9 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
        case KVM_REG_RISCV_CSR_SMSTATEEN:
                rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, &reg_val);
                break;
+       case KVM_REG_RISCV_CSR_HEXT:
+               rc = kvm_riscv_vcpu_nested_get_csr(vcpu, reg_num, &reg_val);
+               break;
        default:
                rc = -ENOENT;
                break;
@@ -409,6 +412,9 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
        case KVM_REG_RISCV_CSR_SMSTATEEN:
                rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val);
                break;
+       case KVM_REG_RISCV_CSR_HEXT:
+               rc = kvm_riscv_vcpu_nested_set_csr(vcpu, reg_num, reg_val);
+               break;
        default:
                rc = -ENOENT;
                break;
@@ -664,6 +670,8 @@ static inline unsigned long num_csr_regs(const struct 
kvm_vcpu *vcpu)
                n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
        if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
                n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned 
long);
+       if (riscv_isa_extension_available(vcpu->arch.isa, h))
+               n += sizeof(struct kvm_riscv_hext_csr) / sizeof(unsigned long);
 
        return n;
 }
@@ -672,7 +680,7 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
                                u64 __user *uindices)
 {
        int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
-       int n2 = 0, n3 = 0;
+       int n2 = 0, n3 = 0, n4 = 0;
 
        /* copy general csr regs */
        for (int i = 0; i < n1; i++) {
@@ -724,7 +732,25 @@ static int copy_csr_reg_indices(const struct kvm_vcpu 
*vcpu,
                }
        }
 
-       return n1 + n2 + n3;
+       /* copy H-extension csr regs */
+       if (riscv_isa_extension_available(vcpu->arch.isa, h)) {
+               n4 = sizeof(struct kvm_riscv_hext_csr) / sizeof(unsigned long);
+
+               for (int i = 0; i < n4; i++) {
+                       u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                                  KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+                       u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+                                         KVM_REG_RISCV_CSR_HEXT | i;
+
+                       if (uindices) {
+                               if (put_user(reg, uindices))
+                                       return -EFAULT;
+                               uindices++;
+                       }
+               }
+       }
+
+       return n1 + n2 + n3 + n4;
 }
 
 static inline unsigned long num_timer_regs(void)
-- 
2.43.0


Reply via email to