On Tue, Feb 26, 2019 at 04:32:30PM +0000, Julien Grall wrote:
> Hi Dave,
> 
> On 18/02/2019 19:52, Dave Martin wrote:
> >@@ -1091,6 +1088,95 @@ static int reg_from_user(u64 *val, const void __user 
> >*uaddr, u64 id);
> >  static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
> >  static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
> >+static unsigned int sve_restrictions(const struct kvm_vcpu *vcpu,
> >+                                 const struct sys_reg_desc *rd)
> >+{
> >+    return vcpu_has_sve(vcpu) ? 0 : REG_NO_USER | REG_NO_GUEST;
> >+}
> >+
> >+static unsigned int sve_id_restrictions(const struct kvm_vcpu *vcpu,
> >+                                    const struct sys_reg_desc *rd)
> >+{
> >+    return vcpu_has_sve(vcpu) ? 0 : REG_NO_USER;
> >+}
> >+
> >+static int get_zcr_el1(struct kvm_vcpu *vcpu,
> >+                   const struct sys_reg_desc *rd,
> >+                   const struct kvm_one_reg *reg, void __user *uaddr)
> >+{
> >+    if (WARN_ON(!vcpu_has_sve(vcpu)))
> >+            return -ENOENT;
> >+
> >+    return reg_to_user(uaddr, &vcpu->arch.ctxt.sys_regs[ZCR_EL1],
> >+                       reg->id);
> >+}
> >+
> >+static int set_zcr_el1(struct kvm_vcpu *vcpu,
> >+                   const struct sys_reg_desc *rd,
> >+                   const struct kvm_one_reg *reg, void __user *uaddr)
> >+{
> >+    if (WARN_ON(!vcpu_has_sve(vcpu)))
> >+            return -ENOENT;
> >+
> >+    return reg_from_user(&vcpu->arch.ctxt.sys_regs[ZCR_EL1], uaddr,
> >+                         reg->id);
> >+}
> >+
> >+/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
> >+static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
> >+{
> >+    if (!vcpu_has_sve(vcpu))
> >+            return 0;
> >+
> >+    return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
> >+}
> >+
> >+static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
> >+                               struct sys_reg_params *p,
> >+                               const struct sys_reg_desc *rd)
> >+{
> >+    if (p->is_write)
> >+            return write_to_read_only(vcpu, p, rd);
> >+
> >+    p->regval = guest_id_aa64zfr0_el1(vcpu);
> >+    return true;
> >+}
> >+
> >+static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
> >+            const struct sys_reg_desc *rd,
> >+            const struct kvm_one_reg *reg, void __user *uaddr)
> >+{
> >+    u64 val;
> >+
> >+    if (!vcpu_has_sve(vcpu))
> >+            return -ENOENT;
> >+
> >+    val = guest_id_aa64zfr0_el1(vcpu);
> >+    return reg_to_user(uaddr, &val, reg->id);
> >+}
> >+
> >+static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
> >+            const struct sys_reg_desc *rd,
> >+            const struct kvm_one_reg *reg, void __user *uaddr)
> >+{
> >+    const u64 id = sys_reg_to_index(rd);
> >+    int err;
> >+    u64 val;
> >+
> >+    if (!vcpu_has_sve(vcpu))
> >+            return -ENOENT;
> >+
> >+    err = reg_from_user(&val, uaddr, id);
> >+    if (err)
> >+            return err;
> >+
> >+    /* This is what we mean by invariant: you can't change it. */
> >+    if (val != guest_id_aa64zfr0_el1(vcpu))
> >+            return -EINVAL;
> >+
> >+    return 0;
> >+}
> 
> We seem to already have code for handling invariant registers as well as
> reading ID register. I guess the only reason you can't use them is because
> of the check the vcpu is using SVE.
> 
> However, AFAICT the restrictions callback would prevent you to enter the
> {get, set}_id if the vCPU does not support SVE. So the check should not be
> reachable.

Hmmm, those checks were inherited from before this refactoring.

You're right: the checks are now done a common place, so the checks in
the actual accessors should be redundant.

I could demote them to WARN(), but it may make sense simply to delete
them.

The access_id_aa64zfr0_el1() should still be reachable, since we don't
have REG_NO_GUEST for this.

Does that make sense?

Cheers
---Dave
_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to