Re: [PATCH v2 2/2] KVM: arm/arm64: Rename vgic_attr_regs_access to vgic_attr_regs_access_v2

2016-08-16 Thread Auger Eric
Hi,

On 16/08/2016 19:35, Christoffer Dall wrote:
> Just a rename so we can implement a v3-specific function later.
> 
> We take the chance to get rid of the V2/V3 ops comments as well.
> 
> No functional change.
> 
> Signed-off-by: Christoffer Dall 
> ---
>  virt/kvm/arm/vgic/vgic-kvm-device.c | 26 +++---
>  1 file changed, 11 insertions(+), 15 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
> b/virt/kvm/arm/vgic/vgic-kvm-device.c
> index 19fa331..163b057 100644
> --- a/virt/kvm/arm/vgic/vgic-kvm-device.c
> +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
> @@ -294,17 +294,17 @@ static bool lock_all_vcpus(struct kvm *kvm)
>   return true;
>  }
>  
> -/** vgic_attr_regs_access: allows user space to read/write VGIC registers
> - *
> - * @dev: kvm device handle
> - * @attr: kvm device attribute
> - * @reg: address the value is read or written
> - * @is_write: write flag
> +/**
> + * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state
>   *
> + * @dev:  kvm device handle
> + * @attr: kvm device attribute
> + * @reg:  address the value is read or written
> + * @is_write: true if userspace is writing a register
>   */
> -static int vgic_attr_regs_access(struct kvm_device *dev,
> -  struct kvm_device_attr *attr,
> -  u32 *reg, bool is_write)
> +static int vgic_attr_regs_access_v2(struct kvm_device *dev,
> + struct kvm_device_attr *attr,
> + u32 *reg, bool is_write)
>  {
>   struct vgic_reg_attr reg_attr;
>   gpa_t addr;
> @@ -347,8 +347,6 @@ out:
>   return ret;
>  }
>  
> -/* V2 ops */
> -
>  static int vgic_v2_set_attr(struct kvm_device *dev,
>   struct kvm_device_attr *attr)
>  {
> @@ -367,7 +365,7 @@ static int vgic_v2_set_attr(struct kvm_device *dev,
>   if (get_user(reg, uaddr))
>   return -EFAULT;
>  
> - return vgic_attr_regs_access(dev, attr, ®, true);
> + return vgic_attr_regs_access_v2(dev, attr, ®, true);
>   }
>   }
>  
> @@ -389,7 +387,7 @@ static int vgic_v2_get_attr(struct kvm_device *dev,
>   u32 __user *uaddr = (u32 __user *)(long)attr->addr;
>   u32 reg = 0;
>  
> - ret = vgic_attr_regs_access(dev, attr, ®, false);
> + ret = vgic_attr_regs_access_v2(dev, attr, ®, false);
>   if (ret)
>   return ret;
>   return put_user(reg, uaddr);
> @@ -433,8 +431,6 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
>   .has_attr = vgic_v2_has_attr,
>  };
>  
> -/* V3 ops */
> -
>  #ifdef CONFIG_KVM_ARM_VGIC_V3
>  
>  static int vgic_v3_set_attr(struct kvm_device *dev,
> 
Reviewed-by: Eric Auger 

Thanks

Eric
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v2 1/2] KVM: arm/arm64: Factor out vgic_attr_regs_access functionality

2016-08-16 Thread Auger Eric
Hi Christoffer,

On 16/08/2016 19:35, Christoffer Dall wrote:
> As we are about to deal with multiple data types and situations where
> the vgic should not be initialized when doing userspace accesses on the
> register attributes, factor out the functionality of
> vgic_attr_regs_access into smaller bits which can be reused by a new
> function later.
> 
> Signed-off-by: Christoffer Dall 
> ---
>  virt/kvm/arm/vgic/vgic-kvm-device.c | 100 
> ++--
>  1 file changed, 73 insertions(+), 27 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
> b/virt/kvm/arm/vgic/vgic-kvm-device.c
> index 1813f93..19fa331 100644
> --- a/virt/kvm/arm/vgic/vgic-kvm-device.c
> +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
> @@ -233,6 +233,67 @@ int kvm_register_vgic_device(unsigned long type)
>   return ret;
>  }
>  
> +struct vgic_reg_attr {
> + struct kvm_vcpu *vcpu;
> + gpa_t addr;
> +};
> +
> +static int parse_vgic_v2_attr(struct kvm_device *dev,
> +   struct kvm_device_attr *attr,
> +   struct vgic_reg_attr *reg_attr)
> +{
> + int cpuid;
> +
> + cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> +  KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> +
> + if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
> + return -EINVAL;
> +
> + reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> + reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> +
> + return 0;
> +}
> +
> +/* unlocks vcpus from @vcpu_lock_idx and smaller */
> +static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
> +{
> + struct kvm_vcpu *tmp_vcpu;
> +
> + for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
> + tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
> + mutex_unlock(&tmp_vcpu->mutex);
> + }
> +}
> +
> +static void unlock_all_vcpus(struct kvm *kvm)
> +{
> + unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
> +}
> +
> +/* Returns true if all vcpus were locked, false otherwise */
> +static bool lock_all_vcpus(struct kvm *kvm)
> +{
> + struct kvm_vcpu *tmp_vcpu;
> + int c;
> +
> + /*
> +  * Any time a vcpu is run, vcpu_load is called which tries to grab the
> +  * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
> +  * that no other VCPUs are run and fiddle with the vgic state while we
> +  * access it.
> +  */
> + kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
> + if (!mutex_trylock(&tmp_vcpu->mutex)) {
> + unlock_vcpus(kvm, c - 1);
> + return false;
> + }
> + }
> +
> + return true;
> +}
> +
>  /** vgic_attr_regs_access: allows user space to read/write VGIC registers
>   *
>   * @dev: kvm device handle
> @@ -245,15 +306,17 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>struct kvm_device_attr *attr,
>u32 *reg, bool is_write)
>  {
> + struct vgic_reg_attr reg_attr;
>   gpa_t addr;
> - int cpuid, ret, c;
> - struct kvm_vcpu *vcpu, *tmp_vcpu;
> - int vcpu_lock_idx = -1;
> + struct kvm_vcpu *vcpu;
> + int ret;
>  
> - cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> -  KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> - vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> - addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> + ret = parse_vgic_v2_attr(dev, attr, ®_attr);
> + if (ret)
> + return ret;
> +
> + vcpu = reg_attr.vcpu;
> + addr = reg_attr.addr;
>  
>   mutex_lock(&dev->kvm->lock);
>  
> @@ -261,24 +324,11 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>   if (ret)
>   goto out;
>  
> - if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
> - ret = -EINVAL;
> + if (!lock_all_vcpus(dev->kvm)) {
> + ret = -EBUSY;
>   goto out;
>   }
>  
> - /*
> -  * Any time a vcpu is run, vcpu_load is called which tries to grab the
> -  * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
> -  * that no other VCPUs are run and fiddle with the vgic state while we
> -  * access it.
> -  */
> - ret = -EBUSY;
> - kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
> - if (!mutex_trylock(&tmp_vcpu->mutex))
> - goto out;
> - vcpu_lock_idx = c;
> - }
> -
>   switch (attr->group) {
>   case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
>   ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
> @@ -291,12 +341,8 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>   break;
>   }
>  
> + unlock_all_vcpus(dev->kvm);
>  out:
> - for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
> - tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
> - mutex_unlock(&tmp_vcpu->mutex);
> - }
> -
>   mutex_unlock(&dev->kvm->lock);
>   return ret

Re: [PATCH] KVM: arm64: ITS: avoid re-mapping LPIs

2016-08-16 Thread André Przywara
On 16/08/16 18:30, Christoffer Dall wrote:
> On Tue, Aug 16, 2016 at 05:51:06PM +0100, Andre Przywara wrote:
>> When a guest wants to map a device-ID/event-ID combination that is
>> already mapped, we may end up in a situation where an LPI is never
>> "put", thus never being freed.
>> Since the GICv3 spec says that mapping an already mapped LPI is
>> UNPREDICTABLE, lets just bail out early in this situation to avoid
>> any potential leaks.
>>
>> Signed-off-by: Andre Przywara 
>> ---
>>  virt/kvm/arm/vgic/vgic-its.c | 27 +--
>>  1 file changed, 13 insertions(+), 14 deletions(-)
>>
>> diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
>> index 9533080..4660a7d 100644
>> --- a/virt/kvm/arm/vgic/vgic-its.c
>> +++ b/virt/kvm/arm/vgic/vgic-its.c
>> @@ -731,7 +731,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
>> struct vgic_its *its,
>>  u32 device_id = its_cmd_get_deviceid(its_cmd);
>>  u32 event_id = its_cmd_get_id(its_cmd);
>>  u32 coll_id = its_cmd_get_collection(its_cmd);
>> -struct its_itte *itte, *new_itte = NULL;
>> +struct its_itte *itte;
>>  struct its_device *device;
>>  struct its_collection *collection, *new_coll = NULL;
>>  int lpi_nr;
>> @@ -749,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
>> struct vgic_its *its,
>>  lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
>>  return E_ITS_MAPTI_PHYSICALID_OOR;
>>  
>> +/* If there is an existing mapping, behavior is UNPREDICTABLE. */
>> +if (find_itte(its, device_id, event_id))
>> +return 0;
>> +
> 
> By the way, this made me think how these errors are handled, and unless
> I'm mistaken, the return value from vgic_its_handle_command() is simply
> discarded, so even when we return things like -ENOMEM, this is just
> ignored?  Is this really the intention?

Yes, at least at the moment. The spec does not specify how ITS errors
should be communicated (IMPLEMENTATION DEFINED), only that an error
condition itself can be signaled via an SError - for which atm we lack
any code to inject, if I am not mistaken.
Still I wanted to assign those error codes: IMHO it improves readability
and simplifies any later extension in that respect.

For the Linux errors (like -ENOMEM): Due to the asynchronous nature of
the ITS command handling and also the guest triggering the commands,
there is really no better way to report those OoM conditions, for
instance, so I treated them the same as "proper" ITS errors.

Hope that helps.
Cheers,
Andre.

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2] kvm: arm: Enforce some NS-SVC initialisation

2016-08-16 Thread Robin Murphy
Since the non-secure copies of banked registers lack architecturally
defined reset values, there is no actual guarantee when entering in Hyp
from secure-only firmware that the Non-Secure PL1 state will look the
way that kernel entry (in particular the decompressor stub) expects.
So far, we've been getting away with it thanks to implementation details
of ARMv7 cores and/or bootloader behaviour, but for the sake of forwards
compatibility let's try to ensure that we have a minimally sane state
before dropping down into it.

Signed-off-by: Robin Murphy 
---

v2: Initialise SED/ITD to safe values as well.

 arch/arm/kernel/hyp-stub.S | 13 +
 1 file changed, 13 insertions(+)

diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 0b1e4a93d67e..15d073ae5da2 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -142,6 +142,19 @@ ARM_BE8(orrr7, r7, #(1 << 25)) @ HSCTLR.EE
and r7, #0x1f   @ Preserve HPMN
mcr p15, 4, r7, c1, c1, 1   @ HDCR
 
+   @ Make sure NS-SVC is initialised appropriately
+   mrc p15, 0, r7, c1, c0, 0   @ SCTLR
+   orr r7, #(1 << 5)   @ CP15 barriers enabled
+   bic r7, #(3 << 7)   @ Clear SED/ITD for v8 (RES0 for v7)
+   bic r7, #(3 << 19)  @ WXN and UWXN disabled
+   mcr p15, 0, r7, c1, c0, 0   @ SCTLR
+
+   mrc p15, 0, r7, c0, c0, 0   @ MIDR
+   mcr p15, 4, r7, c0, c0, 0   @ VPIDR
+
+   mrc p15, 0, r7, c0, c0, 5   @ MPIDR
+   mcr p15, 4, r7, c0, c0, 5   @ VMPIDR
+
 #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
@ make CNTP_* and CNTPCT accessible from PL1
mrc p15, 0, r7, c0, c1, 1   @ ID_PFR1
-- 
2.8.1.dirty

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [RFC PATCH v2 0/4] arm/arm64: vgic-new: Implement API for vGICv3 live migration

2016-08-16 Thread Christoffer Dall
Hi Vijaya,

On Tue, Aug 09, 2016 at 04:28:42PM +0530, vijay.kil...@gmail.com wrote:
> From: Vijaya Kumar K 
> 
> This patchset adds API for saving and restoring
> of VGICv3 registers to support live migration with new vgic feature.
> This API definition is as per version of VGICv3 specification
> http://lists.infradead.org/pipermail/linux-arm-kernel/2016-July/445611.html
> 
> To test live migration with QEMU, use below patch series
> https://lists.gnu.org/archive/html/qemu-devel/2016-08/msg01444.html
> 
> The patch 3 & 4 are picked from the Pavel's previous implementation.
> http://www.spinics.net/lists/kvm/msg122040.html
> 
> v1 => v2:
>  - The init sequence change patch is no more required.
>Fixed in patch 2 by using static vgic_io_dev regions structure instead
>of using dynamic allocation pointer.
>  - Updated commit message of patch 4.
>  - Dropped usage of union to manage 32-bit and 64-bit access in patch 1.
>Used local variable for 32-bit access.
>  - Updated macro __ARM64_SYS_REG and ARM64_SYS_REG in 
>arch/arm64/include/uapi/asm/kvm.h as per qemu requirements.
> 
I think you should have enough to go on by now for a new revision.  You
can include the two patches I just sent with the series '[PATCH v2]
Rework vgic_attr_regs_access' in the beginning of your series and then
base your work on that, assuming nobody else screams about this.

If you use that approach, and you address the missing soft pending state
etc. that Peter pointed out, then we should be ready for a detailed
review.

Looking forward to the next version.

Thanks,
-Christoffer
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 2/2] KVM: arm/arm64: Rename vgic_attr_regs_access to vgic_attr_regs_access_v2

2016-08-16 Thread Christoffer Dall
On Tue, Aug 16, 2016 at 06:09:24PM +0200, Andrew Jones wrote:
> On Tue, Aug 16, 2016 at 05:10:34PM +0200, Christoffer Dall wrote:
> > Just a rename so we can implement a v3-specific function later.  No
> > functional change.
> > 
> > Signed-off-by: Christoffer Dall 
> > ---
> >  virt/kvm/arm/vgic/vgic-kvm-device.c | 27 ++-
> >  1 file changed, 14 insertions(+), 13 deletions(-)
> > 
> > diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
> > b/virt/kvm/arm/vgic/vgic-kvm-device.c
> > index 22d7ab3..2e18f03 100644
> > --- a/virt/kvm/arm/vgic/vgic-kvm-device.c
> > +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
> > @@ -296,17 +296,20 @@ static bool lock_all_vcpus(struct kvm *kvm)
> > return true;
> >  }
> >  
> > -/** vgic_attr_regs_access: allows user space to read/write VGIC registers
> > - *
> > - * @dev: kvm device handle
> > - * @attr: kvm device attribute
> > - * @reg: address the value is read or written
> > - * @is_write: write flag
> > +/* V2 ops */
> > +
> > +
> 
> Do we want 2 blank lines here? Do we need the 'V2 ops' comment at all?
> 
> > +/**
> > + * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state
> >   *
> > + * @dev:  kvm device handle
> > + * @attr: kvm device attribute
> > + * @reg:  address the value is read or written
> > + * @is_write: true if userspace is writing a register
> >   */
> > -static int vgic_attr_regs_access(struct kvm_device *dev,
> > -struct kvm_device_attr *attr,
> > -u32 *reg, bool is_write)
> > +static int vgic_attr_regs_access_v2(struct kvm_device *dev,
> > +   struct kvm_device_attr *attr,
> > +   u32 *reg, bool is_write)
> >  {
> > struct vgic_reg_attr reg_attr;
> > gpa_t addr;
> > @@ -349,8 +352,6 @@ out:
> > return ret;
> >  }
> >  
> > -/* V2 ops */
> > -
> >  static int vgic_v2_set_attr(struct kvm_device *dev,
> > struct kvm_device_attr *attr)
> >  {
> > @@ -369,7 +370,7 @@ static int vgic_v2_set_attr(struct kvm_device *dev,
> > if (get_user(reg, uaddr))
> > return -EFAULT;
> >  
> > -   return vgic_attr_regs_access(dev, attr, ®, true);
> > +   return vgic_attr_regs_access_v2(dev, attr, ®, true);
> > }
> > }
> >  
> > @@ -391,7 +392,7 @@ static int vgic_v2_get_attr(struct kvm_device *dev,
> > u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> > u32 reg = 0;
> >  
> > -   ret = vgic_attr_regs_access(dev, attr, ®, false);
> > +   ret = vgic_attr_regs_access_v2(dev, attr, ®, false);
> > if (ret)
> > return ret;
> > return put_user(reg, uaddr);
> > -- 
> > 2.9.0
> >
> 
> No better way to attract reviewers than to advertise no functional
> change :-)
> 
> Looks good to me.
> 
Thanks for the quick review!

v2 has been sent, hopefully Vijay can pick these up from here and
include in his series.

Thanks,
-Christoffer
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 1/2] KVM: arm/arm64: Factor out vgic_attr_regs_access functionality

2016-08-16 Thread Christoffer Dall
As we are about to deal with multiple data types and situations where
the vgic should not be initialized when doing userspace accesses on the
register attributes, factor out the functionality of
vgic_attr_regs_access into smaller bits which can be reused by a new
function later.

Signed-off-by: Christoffer Dall 
---
 virt/kvm/arm/vgic/vgic-kvm-device.c | 100 ++--
 1 file changed, 73 insertions(+), 27 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 1813f93..19fa331 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -233,6 +233,67 @@ int kvm_register_vgic_device(unsigned long type)
return ret;
 }
 
+struct vgic_reg_attr {
+   struct kvm_vcpu *vcpu;
+   gpa_t addr;
+};
+
+static int parse_vgic_v2_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ struct vgic_reg_attr *reg_attr)
+{
+   int cpuid;
+
+   cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+   if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
+   return -EINVAL;
+
+   reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+   reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+   return 0;
+}
+
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+   struct kvm_vcpu *tmp_vcpu;
+
+   for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+   tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+   mutex_unlock(&tmp_vcpu->mutex);
+   }
+}
+
+static void unlock_all_vcpus(struct kvm *kvm)
+{
+   unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+/* Returns true if all vcpus were locked, false otherwise */
+static bool lock_all_vcpus(struct kvm *kvm)
+{
+   struct kvm_vcpu *tmp_vcpu;
+   int c;
+
+   /*
+* Any time a vcpu is run, vcpu_load is called which tries to grab the
+* vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
+* that no other VCPUs are run and fiddle with the vgic state while we
+* access it.
+*/
+   kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+   if (!mutex_trylock(&tmp_vcpu->mutex)) {
+   unlock_vcpus(kvm, c - 1);
+   return false;
+   }
+   }
+
+   return true;
+}
+
 /** vgic_attr_regs_access: allows user space to read/write VGIC registers
  *
  * @dev: kvm device handle
@@ -245,15 +306,17 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
 struct kvm_device_attr *attr,
 u32 *reg, bool is_write)
 {
+   struct vgic_reg_attr reg_attr;
gpa_t addr;
-   int cpuid, ret, c;
-   struct kvm_vcpu *vcpu, *tmp_vcpu;
-   int vcpu_lock_idx = -1;
+   struct kvm_vcpu *vcpu;
+   int ret;
 
-   cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
-KVM_DEV_ARM_VGIC_CPUID_SHIFT;
-   vcpu = kvm_get_vcpu(dev->kvm, cpuid);
-   addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+   ret = parse_vgic_v2_attr(dev, attr, ®_attr);
+   if (ret)
+   return ret;
+
+   vcpu = reg_attr.vcpu;
+   addr = reg_attr.addr;
 
mutex_lock(&dev->kvm->lock);
 
@@ -261,24 +324,11 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
if (ret)
goto out;
 
-   if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
-   ret = -EINVAL;
+   if (!lock_all_vcpus(dev->kvm)) {
+   ret = -EBUSY;
goto out;
}
 
-   /*
-* Any time a vcpu is run, vcpu_load is called which tries to grab the
-* vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
-* that no other VCPUs are run and fiddle with the vgic state while we
-* access it.
-*/
-   ret = -EBUSY;
-   kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
-   if (!mutex_trylock(&tmp_vcpu->mutex))
-   goto out;
-   vcpu_lock_idx = c;
-   }
-
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
@@ -291,12 +341,8 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
break;
}
 
+   unlock_all_vcpus(dev->kvm);
 out:
-   for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
-   tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
-   mutex_unlock(&tmp_vcpu->mutex);
-   }
-
mutex_unlock(&dev->kvm->lock);
return ret;
 }
-- 
2.9.0

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 2/2] KVM: arm/arm64: Rename vgic_attr_regs_access to vgic_attr_regs_access_v2

2016-08-16 Thread Christoffer Dall
Just a rename so we can implement a v3-specific function later.

We take the chance to get rid of the V2/V3 ops comments as well.

No functional change.

Signed-off-by: Christoffer Dall 
---
 virt/kvm/arm/vgic/vgic-kvm-device.c | 26 +++---
 1 file changed, 11 insertions(+), 15 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 19fa331..163b057 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -294,17 +294,17 @@ static bool lock_all_vcpus(struct kvm *kvm)
return true;
 }
 
-/** vgic_attr_regs_access: allows user space to read/write VGIC registers
- *
- * @dev: kvm device handle
- * @attr: kvm device attribute
- * @reg: address the value is read or written
- * @is_write: write flag
+/**
+ * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state
  *
+ * @dev:  kvm device handle
+ * @attr: kvm device attribute
+ * @reg:  address the value is read or written
+ * @is_write: true if userspace is writing a register
  */
-static int vgic_attr_regs_access(struct kvm_device *dev,
-struct kvm_device_attr *attr,
-u32 *reg, bool is_write)
+static int vgic_attr_regs_access_v2(struct kvm_device *dev,
+   struct kvm_device_attr *attr,
+   u32 *reg, bool is_write)
 {
struct vgic_reg_attr reg_attr;
gpa_t addr;
@@ -347,8 +347,6 @@ out:
return ret;
 }
 
-/* V2 ops */
-
 static int vgic_v2_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
 {
@@ -367,7 +365,7 @@ static int vgic_v2_set_attr(struct kvm_device *dev,
if (get_user(reg, uaddr))
return -EFAULT;
 
-   return vgic_attr_regs_access(dev, attr, ®, true);
+   return vgic_attr_regs_access_v2(dev, attr, ®, true);
}
}
 
@@ -389,7 +387,7 @@ static int vgic_v2_get_attr(struct kvm_device *dev,
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 reg = 0;
 
-   ret = vgic_attr_regs_access(dev, attr, ®, false);
+   ret = vgic_attr_regs_access_v2(dev, attr, ®, false);
if (ret)
return ret;
return put_user(reg, uaddr);
@@ -433,8 +431,6 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
.has_attr = vgic_v2_has_attr,
 };
 
-/* V3 ops */
-
 #ifdef CONFIG_KVM_ARM_VGIC_V3
 
 static int vgic_v3_set_attr(struct kvm_device *dev,
-- 
2.9.0

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 0/2] Rework vgic_attr_regs_access

2016-08-16 Thread Christoffer Dall
Two small patches to split up the functionality in vgic_attr_regs_access to
make life simpler when having to deal with GICv3 save/restore.

Changes from v1:
 - Comments and white space

Christoffer Dall (2):
  KVM: arm/arm64: Factor out vgic_attr_regs_access functionality
  KVM: arm/arm64: Rename vgic_attr_regs_access to
vgic_attr_regs_access_v2

 virt/kvm/arm/vgic/vgic-kvm-device.c | 122 
 1 file changed, 82 insertions(+), 40 deletions(-)

-- 
2.9.0

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH] KVM: arm64: ITS: avoid re-mapping LPIs

2016-08-16 Thread Christoffer Dall
On Tue, Aug 16, 2016 at 05:51:06PM +0100, Andre Przywara wrote:
> When a guest wants to map a device-ID/event-ID combination that is
> already mapped, we may end up in a situation where an LPI is never
> "put", thus never being freed.
> Since the GICv3 spec says that mapping an already mapped LPI is
> UNPREDICTABLE, lets just bail out early in this situation to avoid
> any potential leaks.
> 
> Signed-off-by: Andre Przywara 
> ---
>  virt/kvm/arm/vgic/vgic-its.c | 27 +--
>  1 file changed, 13 insertions(+), 14 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
> index 9533080..4660a7d 100644
> --- a/virt/kvm/arm/vgic/vgic-its.c
> +++ b/virt/kvm/arm/vgic/vgic-its.c
> @@ -731,7 +731,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
> struct vgic_its *its,
>   u32 device_id = its_cmd_get_deviceid(its_cmd);
>   u32 event_id = its_cmd_get_id(its_cmd);
>   u32 coll_id = its_cmd_get_collection(its_cmd);
> - struct its_itte *itte, *new_itte = NULL;
> + struct its_itte *itte;
>   struct its_device *device;
>   struct its_collection *collection, *new_coll = NULL;
>   int lpi_nr;
> @@ -749,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
> struct vgic_its *its,
>   lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
>   return E_ITS_MAPTI_PHYSICALID_OOR;
>  
> + /* If there is an existing mapping, behavior is UNPREDICTABLE. */
> + if (find_itte(its, device_id, event_id))
> + return 0;
> +

By the way, this made me think how these errors are handled, and unless
I'm mistaken, the return value from vgic_its_handle_command() is simply
discarded, so even when we return things like -ENOMEM, this is just
ignored?  Is this really the intention?

Thanks,
-Christoffer
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH] KVM: arm64: ITS: avoid re-mapping LPIs

2016-08-16 Thread Christoffer Dall
On Tue, Aug 16, 2016 at 05:51:06PM +0100, Andre Przywara wrote:
> When a guest wants to map a device-ID/event-ID combination that is
> already mapped, we may end up in a situation where an LPI is never
> "put", thus never being freed.
> Since the GICv3 spec says that mapping an already mapped LPI is
> UNPREDICTABLE, lets just bail out early in this situation to avoid
> any potential leaks.
> 
> Signed-off-by: Andre Przywara 

Reviewed-by: Christoffer Dall 

Thanks!
-Christoffer

> ---
>  virt/kvm/arm/vgic/vgic-its.c | 27 +--
>  1 file changed, 13 insertions(+), 14 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
> index 9533080..4660a7d 100644
> --- a/virt/kvm/arm/vgic/vgic-its.c
> +++ b/virt/kvm/arm/vgic/vgic-its.c
> @@ -731,7 +731,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
> struct vgic_its *its,
>   u32 device_id = its_cmd_get_deviceid(its_cmd);
>   u32 event_id = its_cmd_get_id(its_cmd);
>   u32 coll_id = its_cmd_get_collection(its_cmd);
> - struct its_itte *itte, *new_itte = NULL;
> + struct its_itte *itte;
>   struct its_device *device;
>   struct its_collection *collection, *new_coll = NULL;
>   int lpi_nr;
> @@ -749,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
> struct vgic_its *its,
>   lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
>   return E_ITS_MAPTI_PHYSICALID_OOR;
>  
> + /* If there is an existing mapping, behavior is UNPREDICTABLE. */
> + if (find_itte(its, device_id, event_id))
> + return 0;
> +
>   collection = find_collection(its, coll_id);
>   if (!collection) {
>   int ret = vgic_its_alloc_collection(its, &collection, coll_id);
> @@ -757,20 +761,16 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
> struct vgic_its *its,
>   new_coll = collection;
>   }
>  
> - itte = find_itte(its, device_id, event_id);
> + itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
>   if (!itte) {
> - itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
> - if (!itte) {
> - if (new_coll)
> - vgic_its_free_collection(its, coll_id);
> - return -ENOMEM;
> - }
> -
> - new_itte = itte;
> - itte->event_id  = event_id;
> - list_add_tail(&itte->itte_list, &device->itt_head);
> + if (new_coll)
> + vgic_its_free_collection(its, coll_id);
> + return -ENOMEM;
>   }
>  
> + itte->event_id  = event_id;
> + list_add_tail(&itte->itte_list, &device->itt_head);
> +
>   itte->collection = collection;
>   itte->lpi = lpi_nr;
>  
> @@ -778,8 +778,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
> struct vgic_its *its,
>   if (IS_ERR(irq)) {
>   if (new_coll)
>   vgic_its_free_collection(its, coll_id);
> - if (new_itte)
> - its_free_itte(kvm, new_itte);
> + its_free_itte(kvm, itte);
>   return PTR_ERR(irq);
>   }
>   itte->irq = irq;
> -- 
> 2.9.0
> 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH] KVM: arm64: ITS: avoid re-mapping LPIs

2016-08-16 Thread Andre Przywara
When a guest wants to map a device-ID/event-ID combination that is
already mapped, we may end up in a situation where an LPI is never
"put", thus never being freed.
Since the GICv3 spec says that mapping an already mapped LPI is
UNPREDICTABLE, lets just bail out early in this situation to avoid
any potential leaks.

Signed-off-by: Andre Przywara 
---
 virt/kvm/arm/vgic/vgic-its.c | 27 +--
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 9533080..4660a7d 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -731,7 +731,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct 
vgic_its *its,
u32 device_id = its_cmd_get_deviceid(its_cmd);
u32 event_id = its_cmd_get_id(its_cmd);
u32 coll_id = its_cmd_get_collection(its_cmd);
-   struct its_itte *itte, *new_itte = NULL;
+   struct its_itte *itte;
struct its_device *device;
struct its_collection *collection, *new_coll = NULL;
int lpi_nr;
@@ -749,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
struct vgic_its *its,
lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
return E_ITS_MAPTI_PHYSICALID_OOR;
 
+   /* If there is an existing mapping, behavior is UNPREDICTABLE. */
+   if (find_itte(its, device_id, event_id))
+   return 0;
+
collection = find_collection(its, coll_id);
if (!collection) {
int ret = vgic_its_alloc_collection(its, &collection, coll_id);
@@ -757,20 +761,16 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, 
struct vgic_its *its,
new_coll = collection;
}
 
-   itte = find_itte(its, device_id, event_id);
+   itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
if (!itte) {
-   itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
-   if (!itte) {
-   if (new_coll)
-   vgic_its_free_collection(its, coll_id);
-   return -ENOMEM;
-   }
-
-   new_itte = itte;
-   itte->event_id  = event_id;
-   list_add_tail(&itte->itte_list, &device->itt_head);
+   if (new_coll)
+   vgic_its_free_collection(its, coll_id);
+   return -ENOMEM;
}
 
+   itte->event_id  = event_id;
+   list_add_tail(&itte->itte_list, &device->itt_head);
+
itte->collection = collection;
itte->lpi = lpi_nr;
 
@@ -778,8 +778,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct 
vgic_its *its,
if (IS_ERR(irq)) {
if (new_coll)
vgic_its_free_collection(its, coll_id);
-   if (new_itte)
-   its_free_itte(kvm, new_itte);
+   its_free_itte(kvm, itte);
return PTR_ERR(irq);
}
itte->irq = irq;
-- 
2.9.0

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 2/2] KVM: arm/arm64: Rename vgic_attr_regs_access to vgic_attr_regs_access_v2

2016-08-16 Thread Andrew Jones
On Tue, Aug 16, 2016 at 05:10:34PM +0200, Christoffer Dall wrote:
> Just a rename so we can implement a v3-specific function later.  No
> functional change.
> 
> Signed-off-by: Christoffer Dall 
> ---
>  virt/kvm/arm/vgic/vgic-kvm-device.c | 27 ++-
>  1 file changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
> b/virt/kvm/arm/vgic/vgic-kvm-device.c
> index 22d7ab3..2e18f03 100644
> --- a/virt/kvm/arm/vgic/vgic-kvm-device.c
> +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
> @@ -296,17 +296,20 @@ static bool lock_all_vcpus(struct kvm *kvm)
>   return true;
>  }
>  
> -/** vgic_attr_regs_access: allows user space to read/write VGIC registers
> - *
> - * @dev: kvm device handle
> - * @attr: kvm device attribute
> - * @reg: address the value is read or written
> - * @is_write: write flag
> +/* V2 ops */
> +
> +

Do we want 2 blank lines here? Do we need the 'V2 ops' comment at all?

> +/**
> + * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state
>   *
> + * @dev:  kvm device handle
> + * @attr: kvm device attribute
> + * @reg:  address the value is read or written
> + * @is_write: true if userspace is writing a register
>   */
> -static int vgic_attr_regs_access(struct kvm_device *dev,
> -  struct kvm_device_attr *attr,
> -  u32 *reg, bool is_write)
> +static int vgic_attr_regs_access_v2(struct kvm_device *dev,
> + struct kvm_device_attr *attr,
> + u32 *reg, bool is_write)
>  {
>   struct vgic_reg_attr reg_attr;
>   gpa_t addr;
> @@ -349,8 +352,6 @@ out:
>   return ret;
>  }
>  
> -/* V2 ops */
> -
>  static int vgic_v2_set_attr(struct kvm_device *dev,
>   struct kvm_device_attr *attr)
>  {
> @@ -369,7 +370,7 @@ static int vgic_v2_set_attr(struct kvm_device *dev,
>   if (get_user(reg, uaddr))
>   return -EFAULT;
>  
> - return vgic_attr_regs_access(dev, attr, ®, true);
> + return vgic_attr_regs_access_v2(dev, attr, ®, true);
>   }
>   }
>  
> @@ -391,7 +392,7 @@ static int vgic_v2_get_attr(struct kvm_device *dev,
>   u32 __user *uaddr = (u32 __user *)(long)attr->addr;
>   u32 reg = 0;
>  
> - ret = vgic_attr_regs_access(dev, attr, ®, false);
> + ret = vgic_attr_regs_access_v2(dev, attr, ®, false);
>   if (ret)
>   return ret;
>   return put_user(reg, uaddr);
> -- 
> 2.9.0
>

No better way to attract reviewers than to advertise no functional
change :-)

Looks good to me.

drew
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 1/2] KVM: arm/arm64: Factor out vgic_attr_regs_access functionality

2016-08-16 Thread Andrew Jones
On Tue, Aug 16, 2016 at 05:10:33PM +0200, Christoffer Dall wrote:
> As we are about to deal with multiple data types and situations where
> the vgic should not be initialized when doing userspace accesses on the
> register attributes, factor out the functionality of
> vgic_attr_regs_access into smaller bits which can be reused by a new
> function later.
> 
> Signed-off-by: Christoffer Dall 
> ---
>  virt/kvm/arm/vgic/vgic-kvm-device.c | 102 
> ++--
>  1 file changed, 75 insertions(+), 27 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
> b/virt/kvm/arm/vgic/vgic-kvm-device.c
> index 1813f93..22d7ab3 100644
> --- a/virt/kvm/arm/vgic/vgic-kvm-device.c
> +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
> @@ -233,6 +233,69 @@ int kvm_register_vgic_device(unsigned long type)
>   return ret;
>  }
>  
> +struct vgic_reg_attr {
> + struct kvm_vcpu *vcpu;
> + gpa_t addr;
> +};
> +
> +static int parse_vgic_v2_attr(struct kvm_device *dev,
> +   struct kvm_device_attr *attr,
> +   struct vgic_reg_attr *reg_attr)
> +{
> + int cpuid;
> +
> + cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> +  KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> +
> + if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
> + return -EINVAL;
> +
> + reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> + reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> +
> + return 0;
> +}
> +
> +/* unlocks vcpus from @vcpu_lock_idx and smaller */
> +static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
> +{
> + struct kvm_vcpu *tmp_vcpu;
> +
> + for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
> + tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
> + mutex_unlock(&tmp_vcpu->mutex);
> + }
> +}
> +
> +/* unlocks vcpus from @vcpu_lock_idx and smaller */

incorrect comment here (copy+pasted from above)

> +static void unlock_all_vcpus(struct kvm *kvm)
> +{
> + unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
> +}
> +
> +
> +/* Returns true if all vcpus were locked, false otherwise */
> +static bool lock_all_vcpus(struct kvm *kvm)
> +{
> + struct kvm_vcpu *tmp_vcpu;
> + int c;
> +
> + /*
> +  * Any time a vcpu is run, vcpu_load is called which tries to grab the
> +  * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
> +  * that no other VCPUs are run and fiddle with the vgic state while we
> +  * access it.
> +  */
> + kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
> + if (!mutex_trylock(&tmp_vcpu->mutex)) {
> + unlock_vcpus(kvm, c - 1);
> + return false;
> + }
> + }
> +
> + return true;
> +}
> +
>  /** vgic_attr_regs_access: allows user space to read/write VGIC registers
>   *
>   * @dev: kvm device handle
> @@ -245,15 +308,17 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>struct kvm_device_attr *attr,
>u32 *reg, bool is_write)
>  {
> + struct vgic_reg_attr reg_attr;
>   gpa_t addr;
> - int cpuid, ret, c;
> - struct kvm_vcpu *vcpu, *tmp_vcpu;
> - int vcpu_lock_idx = -1;
> + struct kvm_vcpu *vcpu;
> + int ret;
>  
> - cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> -  KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> - vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> - addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> + ret = parse_vgic_v2_attr(dev, attr, ®_attr);
> + if (ret)
> + return ret;
> +
> + vcpu = reg_attr.vcpu;
> + addr = reg_attr.addr;
>  
>   mutex_lock(&dev->kvm->lock);
>  
> @@ -261,24 +326,11 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>   if (ret)
>   goto out;
>  
> - if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
> - ret = -EINVAL;
> + if (!lock_all_vcpus(dev->kvm)) {
> + ret = -EBUSY;
>   goto out;
>   }
>  
> - /*
> -  * Any time a vcpu is run, vcpu_load is called which tries to grab the
> -  * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
> -  * that no other VCPUs are run and fiddle with the vgic state while we
> -  * access it.
> -  */
> - ret = -EBUSY;
> - kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
> - if (!mutex_trylock(&tmp_vcpu->mutex))
> - goto out;
> - vcpu_lock_idx = c;
> - }
> -
>   switch (attr->group) {
>   case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
>   ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
> @@ -291,12 +343,8 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>   break;
>   }
>  
> + unlock_all_vcpus(dev->kvm);
>  out:
> - for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
> - tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
> -   

[PATCH 2/2] KVM: arm/arm64: Rename vgic_attr_regs_access to vgic_attr_regs_access_v2

2016-08-16 Thread Christoffer Dall
Just a rename so we can implement a v3-specific function later.  No
functional change.

Signed-off-by: Christoffer Dall 
---
 virt/kvm/arm/vgic/vgic-kvm-device.c | 27 ++-
 1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 22d7ab3..2e18f03 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -296,17 +296,20 @@ static bool lock_all_vcpus(struct kvm *kvm)
return true;
 }
 
-/** vgic_attr_regs_access: allows user space to read/write VGIC registers
- *
- * @dev: kvm device handle
- * @attr: kvm device attribute
- * @reg: address the value is read or written
- * @is_write: write flag
+/* V2 ops */
+
+
+/**
+ * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state
  *
+ * @dev:  kvm device handle
+ * @attr: kvm device attribute
+ * @reg:  address the value is read or written
+ * @is_write: true if userspace is writing a register
  */
-static int vgic_attr_regs_access(struct kvm_device *dev,
-struct kvm_device_attr *attr,
-u32 *reg, bool is_write)
+static int vgic_attr_regs_access_v2(struct kvm_device *dev,
+   struct kvm_device_attr *attr,
+   u32 *reg, bool is_write)
 {
struct vgic_reg_attr reg_attr;
gpa_t addr;
@@ -349,8 +352,6 @@ out:
return ret;
 }
 
-/* V2 ops */
-
 static int vgic_v2_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
 {
@@ -369,7 +370,7 @@ static int vgic_v2_set_attr(struct kvm_device *dev,
if (get_user(reg, uaddr))
return -EFAULT;
 
-   return vgic_attr_regs_access(dev, attr, ®, true);
+   return vgic_attr_regs_access_v2(dev, attr, ®, true);
}
}
 
@@ -391,7 +392,7 @@ static int vgic_v2_get_attr(struct kvm_device *dev,
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 reg = 0;
 
-   ret = vgic_attr_regs_access(dev, attr, ®, false);
+   ret = vgic_attr_regs_access_v2(dev, attr, ®, false);
if (ret)
return ret;
return put_user(reg, uaddr);
-- 
2.9.0

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 1/2] KVM: arm/arm64: Factor out vgic_attr_regs_access functionality

2016-08-16 Thread Christoffer Dall
As we are about to deal with multiple data types and situations where
the vgic should not be initialized when doing userspace accesses on the
register attributes, factor out the functionality of
vgic_attr_regs_access into smaller bits which can be reused by a new
function later.

Signed-off-by: Christoffer Dall 
---
 virt/kvm/arm/vgic/vgic-kvm-device.c | 102 ++--
 1 file changed, 75 insertions(+), 27 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 1813f93..22d7ab3 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -233,6 +233,69 @@ int kvm_register_vgic_device(unsigned long type)
return ret;
 }
 
+struct vgic_reg_attr {
+   struct kvm_vcpu *vcpu;
+   gpa_t addr;
+};
+
+static int parse_vgic_v2_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ struct vgic_reg_attr *reg_attr)
+{
+   int cpuid;
+
+   cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+   if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
+   return -EINVAL;
+
+   reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+   reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+   return 0;
+}
+
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+   struct kvm_vcpu *tmp_vcpu;
+
+   for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+   tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+   mutex_unlock(&tmp_vcpu->mutex);
+   }
+}
+
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_all_vcpus(struct kvm *kvm)
+{
+   unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+
+/* Returns true if all vcpus were locked, false otherwise */
+static bool lock_all_vcpus(struct kvm *kvm)
+{
+   struct kvm_vcpu *tmp_vcpu;
+   int c;
+
+   /*
+* Any time a vcpu is run, vcpu_load is called which tries to grab the
+* vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
+* that no other VCPUs are run and fiddle with the vgic state while we
+* access it.
+*/
+   kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+   if (!mutex_trylock(&tmp_vcpu->mutex)) {
+   unlock_vcpus(kvm, c - 1);
+   return false;
+   }
+   }
+
+   return true;
+}
+
 /** vgic_attr_regs_access: allows user space to read/write VGIC registers
  *
  * @dev: kvm device handle
@@ -245,15 +308,17 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
 struct kvm_device_attr *attr,
 u32 *reg, bool is_write)
 {
+   struct vgic_reg_attr reg_attr;
gpa_t addr;
-   int cpuid, ret, c;
-   struct kvm_vcpu *vcpu, *tmp_vcpu;
-   int vcpu_lock_idx = -1;
+   struct kvm_vcpu *vcpu;
+   int ret;
 
-   cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
-KVM_DEV_ARM_VGIC_CPUID_SHIFT;
-   vcpu = kvm_get_vcpu(dev->kvm, cpuid);
-   addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+   ret = parse_vgic_v2_attr(dev, attr, ®_attr);
+   if (ret)
+   return ret;
+
+   vcpu = reg_attr.vcpu;
+   addr = reg_attr.addr;
 
mutex_lock(&dev->kvm->lock);
 
@@ -261,24 +326,11 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
if (ret)
goto out;
 
-   if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
-   ret = -EINVAL;
+   if (!lock_all_vcpus(dev->kvm)) {
+   ret = -EBUSY;
goto out;
}
 
-   /*
-* Any time a vcpu is run, vcpu_load is called which tries to grab the
-* vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
-* that no other VCPUs are run and fiddle with the vgic state while we
-* access it.
-*/
-   ret = -EBUSY;
-   kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
-   if (!mutex_trylock(&tmp_vcpu->mutex))
-   goto out;
-   vcpu_lock_idx = c;
-   }
-
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
@@ -291,12 +343,8 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
break;
}
 
+   unlock_all_vcpus(dev->kvm);
 out:
-   for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
-   tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
-   mutex_unlock(&tmp_vcpu->mutex);
-   }
-
mutex_unlock(&dev->kvm->lock);
return ret;
 }
-- 
2.9.0

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia

[PATCH 0/2] Rework vgic_attr_regs_access

2016-08-16 Thread Christoffer Dall
Two small patches to split up the functionality in vgic_attr_regs_access to
make life simpler when having to deal with GICv3 save/restore.

Christoffer Dall (2):
  KVM: arm/arm64: Factor out vgic_attr_regs_access functionality
  KVM: arm/arm64: Rename vgic_attr_regs_access to
vgic_attr_regs_access_v2

 virt/kvm/arm/vgic/vgic-kvm-device.c | 125 +---
 1 file changed, 87 insertions(+), 38 deletions(-)

-- 
2.9.0

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [RFC PATCH v2 2/4] arm/arm64: vgic-new: Add distributor and redistributor access

2016-08-16 Thread Christoffer Dall
On Tue, Aug 09, 2016 at 04:28:44PM +0530, vijay.kil...@gmail.com wrote:
> From: Vijaya Kumar K 
> 
> VGICv3 Distributor and Redistributor registers are accessed using
> KVM_DEV_ARM_VGIC_GRP_DIST_REGS and KVM_DEV_ARM_VGIC_GRP_DIST_REGS
> with KVM_SET_DEVICE_ATTR and KVM_GET_DEVICE_ATTR ioctls.
> These registers are accessed as 32-bit and cpu mpidr
> value passed along with register offset is used to identify the
> cpu for redistributor registers access.
> 
> The version of VGIC v3 specification is define here
> http://lists.infradead.org/pipermail/linux-arm-kernel/2016-July/445611.html
> 
> Signed-off-by: Vijaya Kumar K 
> ---
>  arch/arm64/include/uapi/asm/kvm.h   |   3 +
>  virt/kvm/arm/vgic/vgic-kvm-device.c |  81 --
>  virt/kvm/arm/vgic/vgic-mmio-v3.c| 113 
> 
>  virt/kvm/arm/vgic/vgic-mmio.c   |   2 +-
>  virt/kvm/arm/vgic/vgic.h|   8 +++
>  5 files changed, 200 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/arm64/include/uapi/asm/kvm.h 
> b/arch/arm64/include/uapi/asm/kvm.h
> index f209ea1..a6b996e 100644
> --- a/arch/arm64/include/uapi/asm/kvm.h
> +++ b/arch/arm64/include/uapi/asm/kvm.h
> @@ -199,10 +199,13 @@ struct kvm_arch_memory_slot {
>  #define KVM_DEV_ARM_VGIC_GRP_CPU_REGS2
>  #define   KVM_DEV_ARM_VGIC_CPUID_SHIFT   32
>  #define   KVM_DEV_ARM_VGIC_CPUID_MASK(0xffULL << 
> KVM_DEV_ARM_VGIC_CPUID_SHIFT)
> +#define   KVM_DEV_ARM_VGIC_V3_CPUID_MASK \
> + (0xULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
>  #define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT  0
>  #define   KVM_DEV_ARM_VGIC_OFFSET_MASK   (0xULL << 
> KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
>  #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
>  #define KVM_DEV_ARM_VGIC_GRP_CTRL4
> +#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
>  #define   KVM_DEV_ARM_VGIC_CTRL_INIT 0
>  
>  /* Device Control API on vcpu fd */
> diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
> b/virt/kvm/arm/vgic/vgic-kvm-device.c
> index 06de322..986f8e1 100644
> --- a/virt/kvm/arm/vgic/vgic-kvm-device.c
> +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
> @@ -243,10 +243,19 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>   struct kvm_vcpu *vcpu, *tmp_vcpu;
>   int vcpu_lock_idx = -1;
>   u32 tmp32;
> + struct vgic_dist *vgic = &dev->kvm->arch.vgic;
>  
> - cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> -  KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> - vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> + if (vgic->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
> + cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> +  KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> + vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> + }
> + else
> + {

coding style issues

> + cpuid = (attr->attr & KVM_DEV_ARM_VGIC_V3_CPUID_MASK) >>
> +  KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> + vcpu = kvm_mpidr_to_vcpu(dev->kvm, cpuid);
> + }

regardless, this function is getting way too bloated.

>   addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
>  
>   mutex_lock(&dev->kvm->lock);
> @@ -283,10 +292,25 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>   *reg = tmp32;
>   break;
>   case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> - ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &tmp32);
> + if (vgic->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
> + ret = vgic_v2_dist_uaccess(vcpu, is_write, addr,
> +&tmp32);
> + else
> + ret = vgic_v3_dist_uaccess(vcpu, is_write, addr,
> +&tmp32);
>   if (!is_write)
>   *reg = tmp32;
>   break;
> + case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
> + if (vgic->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
> + ret = vgic_v3_redist_uaccess(vcpu, is_write, addr,
> +  &tmp32);
> + if (!is_write)
> + *reg = tmp32;
> + } else {
> + ret = -EINVAL;
> + }
> + break;
>   default:
>   ret = -EINVAL;
>   break;
> @@ -399,13 +423,55 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
>  static int vgic_v3_set_attr(struct kvm_device *dev,
>   struct kvm_device_attr *attr)
>  {
> - return vgic_set_common_attr(dev, attr);
> + int ret;
> +
> + ret = vgic_set_common_attr(dev, attr);
> + if (ret != -ENXIO)
> + return ret;
> +

I think you need to check for (!vgic_initialized()) here, no?

> + switch (attr->group) {
> + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> + case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
> + u32 __user *uaddr = (u32 __user

Re: [RFC PATCH v2 1/4] arm/arm64: vgic-new: Introduce 64-bit reg access support

2016-08-16 Thread Christoffer Dall
On Tue, Aug 09, 2016 at 04:28:43PM +0530, vijay.kil...@gmail.com wrote:
> From: Vijaya Kumar K 
> 
> vgic_attr_regs_access() handles only 32-bit register
> value. Pass u64 as parameter and locally handle 32-bit
> reads and writes depending on attribute group.
> 
> Signed-off-by: Vijaya Kumar K 
> ---
>  virt/kvm/arm/vgic/vgic-kvm-device.c | 26 +++---
>  1 file changed, 19 insertions(+), 7 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c 
> b/virt/kvm/arm/vgic/vgic-kvm-device.c
> index 0130c4b..06de322 100644
> --- a/virt/kvm/arm/vgic/vgic-kvm-device.c
> +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
> @@ -236,12 +236,13 @@ void kvm_register_vgic_device(unsigned long type)
>   */
>  static int vgic_attr_regs_access(struct kvm_device *dev,
>struct kvm_device_attr *attr,
> -  u32 *reg, bool is_write)
> +  u64 *reg, bool is_write)
>  {
>   gpa_t addr;
>   int cpuid, ret, c;
>   struct kvm_vcpu *vcpu, *tmp_vcpu;
>   int vcpu_lock_idx = -1;
> + u32 tmp32;
>  
>   cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
>KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> @@ -272,12 +273,19 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>   vcpu_lock_idx = c;
>   }
>  
> + if (is_write)
> + tmp32 = *reg;
> +

I'm not a fan of this, from seeing that you do the read conversion
inside the case statements I gather you put this here so you only have
to have it once, even though you throw it away if you're doing 64-bit
accesses?

But a greater concern is the vgic_init() call above, which you don't
handle.

I thought we were supposed to get rid of all this lazy vgic init stuff.

Let me send you a patch series of how to rework this vgic_attr function
so that you can reuse some of the functionality and implement a new
gicv3 function on top of that.

Thanks,
-Christoffer

>   switch (attr->group) {
>   case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
> - ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
> + ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &tmp32);
> + if (!is_write)
> + *reg = tmp32;
>   break;
>   case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
> - ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
> + ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &tmp32);
> + if (!is_write)
> + *reg = tmp32;


>   break;
>   default:
>   ret = -EINVAL;
> @@ -309,11 +317,13 @@ static int vgic_v2_set_attr(struct kvm_device *dev,
>   case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
>   case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
>   u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> - u32 reg;
> + u32 tmp32;
> + u64 reg;
>  
> - if (get_user(reg, uaddr))
> + if (get_user(tmp32, uaddr))
>   return -EFAULT;
>  
> + reg = tmp32;
>   return vgic_attr_regs_access(dev, attr, ®, true);
>   }
>   }
> @@ -334,12 +344,14 @@ static int vgic_v2_get_attr(struct kvm_device *dev,
>   case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
>   case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
>   u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> - u32 reg = 0;
> + u32 tmp32;
> + u64 reg;
>  
>   ret = vgic_attr_regs_access(dev, attr, ®, false);
>   if (ret)
>   return ret;
> - return put_user(reg, uaddr);
> + tmp32 = reg;
> + return put_user(tmp32, uaddr);
>   }
>   }
>  
> -- 
> 1.9.1
> 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH] kvm: arm: Enforce some NS-SVC initialisation

2016-08-16 Thread Robin Murphy
Hi Marc,

On 16/08/16 14:33, Marc Zyngier wrote:
> On 21/07/16 13:01, Robin Murphy wrote:
>> Since the non-secure copies of banked registers lack architecturally
>> defined reset values, there is no actual guarantee when entering in Hyp
>> from secure-only firmware that the non-secure PL1 state will look the
>> way that kernel entry (in particular the decompressor stub) expects.
>> So far, we've been getting away with it thanks to implementation details
>> of ARMv7 cores and/or bootloader behaviour, but for the sake of forwards
>> compatibility let's try to ensure that we have a minimally sane state
>> before dropping down into it.
>>
>> Signed-off-by: Robin Murphy 
>> ---
>>  arch/arm/kernel/hyp-stub.S | 12 
>>  1 file changed, 12 insertions(+)
>>
>> diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
>> index 0b1e4a93d67e..7de3fe15ab21 100644
>> --- a/arch/arm/kernel/hyp-stub.S
>> +++ b/arch/arm/kernel/hyp-stub.S
>> @@ -142,6 +142,18 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
>>  and r7, #0x1f   @ Preserve HPMN
>>  mcr p15, 4, r7, c1, c1, 1   @ HDCR
>>  
>> +@ Make sure NS-SVC is initialised appropriately
>> +mrc p15, 0, r7, c1, c0, 0   @ SCTLR
>> +orr r7, #(1 << 5)   @ CP15 barriers enabled
>> +bic r7, #(3 << 19)  @ WXN and UWXN disabled
> 
> I think that while you're doing this, you also may want to clear SED and
> ITD so that a BE kernel has a chance to survive its first instruction
> (assuming it it uses the decompressor...).

Good point; I wrote this from the v7 perspective and neglected those,
and I think I was actually trying to achieve something useful at the
time which precluded cracking out the big-endian Thumb-2 kernel ;)

>From a quick correlation between ARM ARMs, those bits should be reliably
safe to unconditionally clear on v7VE, so let's do it. I'll respin shortly.

>> +mcr p15, 0, r7, c1, c0, 0   @ SCTLR
>> +
>> +mrc p15, 0, r7, c0, c0, 0   @ MIDR
>> +mcr p15, 4, r7, c0, c0, 0   @ VPIDR
>> +
>> +mrc p15, 0, r7, c0, c0, 5   @ MPIDR
>> +mcr p15, 4, r7, c0, c0, 5   @ VMPIDR
>> +
>>  #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
>>  @ make CNTP_* and CNTPCT accessible from PL1
>>  mrc p15, 0, r7, c0, c1, 1   @ ID_PFR1
>>
> 
> Otherwise looks good.

Cheers,
Robin.

> 
> Thanks,
> 
>   M.
> 

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH] KVM: arm/arm64: timer: Workaround misconfigured timer interrupt

2016-08-16 Thread Marc Zyngier
Similarily to f005bd7e3b84 ("clocksource/arm_arch_timer: Force
per-CPU interrupt to be level-triggered"), make sure we can
survive an interrupt that has been misconfigured as edge-triggered
by forcing it to be level-triggered (active low is assumed, but
the GIC doesn't really care whether this is high or low).

Hopefully, the amount of shouting in the kernel log will convince
the user to do something about their firmware.

Signed-off-by: Marc Zyngier 
---
 virt/kvm/arm/arch_timer.c | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4fde8c7..77e6ccf 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -33,6 +33,7 @@
 static struct timecounter *timecounter;
 static struct workqueue_struct *wqueue;
 static unsigned int host_vtimer_irq;
+static u32 host_vtimer_irq_flags;
 
 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
 {
@@ -365,7 +366,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
 
 static void kvm_timer_init_interrupt(void *info)
 {
-   enable_percpu_irq(host_vtimer_irq, 0);
+   enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 }
 
 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
@@ -432,6 +433,14 @@ int kvm_timer_hyp_init(void)
}
host_vtimer_irq = info->virtual_irq;
 
+   host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
+   if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
+   host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
+   kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
+   host_vtimer_irq);
+   host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
+   }
+
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
 "kvm guest timer", kvm_get_running_vcpus());
if (err) {
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH] arm64: Document workaround for Cortex-A72 erratum #853709

2016-08-16 Thread Marc Zyngier
We already have a workaround for Cortex-A57 erratum #852523,
but Cortex-A72 r0p0 to r0p2 do suffer from the same issue
(known as erratum #853709).

Let's document the fact that we already handle this.

Acked-by: Will Deacon 
Signed-off-by: Marc Zyngier 
---
 Documentation/arm64/silicon-errata.txt | 1 +
 arch/arm64/kvm/hyp/switch.c| 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/Documentation/arm64/silicon-errata.txt 
b/Documentation/arm64/silicon-errata.txt
index 4da60b4..ccc6032 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -53,6 +53,7 @@ stable kernels.
 | ARM| Cortex-A57  | #832075 | ARM64_ERRATUM_832075
|
 | ARM| Cortex-A57  | #852523 | N/A 
|
 | ARM| Cortex-A57  | #834220 | ARM64_ERRATUM_834220
|
+| ARM| Cortex-A72  | #853709 | N/A 
|
 | ARM| MMU-500 | #841119,#826419 | N/A 
|
 || | | 
|
 | Cavium | ThunderX ITS| #22375, #24313  | CAVIUM_ERRATUM_22375
|
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ae7855f..5a84b45 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
 
/*
 * We must restore the 32-bit state before the sysregs, thanks
-* to Cortex-A57 erratum #852523.
+* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
 */
__sysreg32_restore_state(vcpu);
__sysreg_restore_guest_state(guest_ctxt);
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH] kvm: arm: Enforce some NS-SVC initialisation

2016-08-16 Thread Marc Zyngier
On 21/07/16 13:01, Robin Murphy wrote:
> Since the non-secure copies of banked registers lack architecturally
> defined reset values, there is no actual guarantee when entering in Hyp
> from secure-only firmware that the non-secure PL1 state will look the
> way that kernel entry (in particular the decompressor stub) expects.
> So far, we've been getting away with it thanks to implementation details
> of ARMv7 cores and/or bootloader behaviour, but for the sake of forwards
> compatibility let's try to ensure that we have a minimally sane state
> before dropping down into it.
> 
> Signed-off-by: Robin Murphy 
> ---
>  arch/arm/kernel/hyp-stub.S | 12 
>  1 file changed, 12 insertions(+)
> 
> diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
> index 0b1e4a93d67e..7de3fe15ab21 100644
> --- a/arch/arm/kernel/hyp-stub.S
> +++ b/arch/arm/kernel/hyp-stub.S
> @@ -142,6 +142,18 @@ ARM_BE8(orr  r7, r7, #(1 << 25)) @ HSCTLR.EE
>   and r7, #0x1f   @ Preserve HPMN
>   mcr p15, 4, r7, c1, c1, 1   @ HDCR
>  
> + @ Make sure NS-SVC is initialised appropriately
> + mrc p15, 0, r7, c1, c0, 0   @ SCTLR
> + orr r7, #(1 << 5)   @ CP15 barriers enabled
> + bic r7, #(3 << 19)  @ WXN and UWXN disabled

I think that while you're doing this, you also may want to clear SED and
ITD so that a BE kernel has a chance to survive its first instruction
(assuming it it uses the decompressor...).

> + mcr p15, 0, r7, c1, c0, 0   @ SCTLR
> +
> + mrc p15, 0, r7, c0, c0, 0   @ MIDR
> + mcr p15, 4, r7, c0, c0, 0   @ VPIDR
> +
> + mrc p15, 0, r7, c0, c0, 5   @ MPIDR
> + mcr p15, 4, r7, c0, c0, 5   @ VMPIDR
> +
>  #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
>   @ make CNTP_* and CNTPCT accessible from PL1
>   mrc p15, 0, r7, c0, c1, 1   @ ID_PFR1
> 

Otherwise looks good.

Thanks,

M.
-- 
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[RFC PATCH 2/7] KVM: Track the pid of the VM process

2016-08-16 Thread Punit Agrawal
Userspace tools such as perf can be used to profile individual
processes.

Track the PID of the virtual machine process to match profiling requests
targeted at it. This can be used to take appropriate action to enable
the requested profiling operations for the VMs of interest.

Signed-off-by: Punit Agrawal 
Cc: Paolo Bonzini 
Cc: "Radim Krčmář" 
Cc: Christoffer Dall 
Cc: Marc Zyngier 
---
 include/linux/kvm_host.h | 1 +
 virt/kvm/kvm_main.c  | 2 ++
 2 files changed, 3 insertions(+)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9c28b4d..7c42c94 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -374,6 +374,7 @@ struct kvm_memslots {
 struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
+   struct pid *pid;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1950782..ab2535a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -613,6 +613,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
spin_lock_init(&kvm->mmu_lock);
atomic_inc(¤t->mm->mm_count);
kvm->mm = current->mm;
+   kvm->pid = get_task_pid(current, PIDTYPE_PID);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
@@ -712,6 +713,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
int i;
struct mm_struct *mm = kvm->mm;
 
+   put_pid(kvm->pid);
kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
spin_lock(&kvm_lock);
-- 
2.8.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[RFC PATCH 6/7] arm64: KVM: Handle trappable TLB instructions

2016-08-16 Thread Punit Agrawal
The ARMv8 architecture allows trapping of TLB maintenane instructions
from EL0/EL1 to higher exception levels. On encountering a trappable TLB
instruction in a guest, an exception is taken to EL2.

Add functionality to handle emulating the TLB instructions.

Signed-off-by: Punit Agrawal 
Cc: Christoffer Dall 
Cc: Marc Zyngier 
---
 arch/arm64/include/asm/kvm_asm.h |   1 +
 arch/arm64/kvm/hyp/tlb.c | 146 +++
 arch/arm64/kvm/sys_regs.c|  81 ++
 arch/arm64/kvm/trace.h   |  16 +
 4 files changed, 244 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 7561f63..1ac1cc3 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -49,6 +49,7 @@ extern char __kvm_hyp_vector[];
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_emulate_tlb_invalidate(struct kvm *kvm, u32 sysreg, u64 
regval);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 4cda100..e0a0309 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -78,3 +78,149 @@ static void __hyp_text __tlb_flush_vm_context(void)
 }
 
 __alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
+
+/* Intentionally empty functions */
+static void __hyp_text __switch_to_hyp_role_nvhe(void) { }
+static void __hyp_text __switch_to_host_role_nvhe(void) { }
+
+static void __hyp_text __switch_to_hyp_role_vhe(void)
+{
+   u64 hcr = read_sysreg(hcr_el2);
+
+   hcr &= ~HCR_TGE;
+   write_sysreg(hcr, hcr_el2);
+}
+
+static void __hyp_text __switch_to_host_role_vhe(void)
+{
+   u64 hcr = read_sysreg(hcr_el2);
+
+   hcr |= HCR_TGE;
+   write_sysreg(hcr, hcr_el2);
+}
+
+static hyp_alternate_select(__switch_to_hyp_role,
+   __switch_to_hyp_role_nvhe,
+   __switch_to_hyp_role_vhe,
+   ARM64_HAS_VIRT_HOST_EXTN);
+
+static hyp_alternate_select(__switch_to_host_role,
+   __switch_to_host_role_nvhe,
+   __switch_to_host_role_vhe,
+   ARM64_HAS_VIRT_HOST_EXTN);
+
+static void __hyp_text __switch_to_guest_regime(struct kvm *kvm)
+{
+   write_sysreg(kvm->arch.vttbr, vttbr_el2);
+   __switch_to_hyp_role();
+   isb();
+}
+
+static void __hyp_text __switch_to_host_regime(void)
+{
+   __switch_to_host_role();
+   write_sysreg(0, vttbr_el2);
+}
+
+/*
+ *  AArch32 TLB maintenance instructions trapping to EL2
+ */
+#define TLBIALLIS  sys_reg(0, 0, 8, 3, 0)
+#define TLBIMVAIS  sys_reg(0, 0, 8, 3, 1)
+#define TLBIASIDIS sys_reg(0, 0, 8, 3, 2)
+#define TLBIMVAAIS sys_reg(0, 0, 8, 3, 3)
+#define TLBIMVALIS sys_reg(0, 0, 8, 3, 5)
+#define TLBIMVAALISsys_reg(0, 0, 8, 3, 7)
+#define ITLBIALL   sys_reg(0, 0, 8, 5, 0)
+#define ITLBIMVA   sys_reg(0, 0, 8, 5, 1)
+#define ITLBIASID  sys_reg(0, 0, 8, 5, 2)
+#define DTLBIALL   sys_reg(0, 0, 8, 6, 0)
+#define DTLBIMVA   sys_reg(0, 0, 8, 6, 1)
+#define DTLBIASID  sys_reg(0, 0, 8, 6, 2)
+#define TLBIALLsys_reg(0, 0, 8, 7, 0)
+#define TLBIMVAsys_reg(0, 0, 8, 7, 1)
+#define TLBIASID   sys_reg(0, 0, 8, 7, 2)
+#define TLBIMVAA   sys_reg(0, 0, 8, 7, 3)
+#define TLBIMVAL   sys_reg(0, 0, 8, 7, 5)
+#define TLBIMVAAL  sys_reg(0, 0, 8, 7, 7)
+
+/*
+ * ARMv8 ARM: Table C5-4 TLB maintenance instructions
+ * (Ref: ARMv8 ARM C5.1 version: ARM DDI 0487A.j)
+ */
+#define TLBI_VMALLE1IS sys_reg(1, 0, 8, 3, 0)
+#define TLBI_VAE1ISsys_reg(1, 0, 8, 3, 1)
+#define TLBI_ASIDE1IS  sys_reg(1, 0, 8, 3, 2)
+#define TLBI_VAAE1IS   sys_reg(1, 0, 8, 3, 3)
+#define TLBI_VALE1IS   sys_reg(1, 0, 8, 3, 5)
+#define TLBI_VAALE1IS  sys_reg(1, 0, 8, 3, 7)
+#define TLBI_VMALLE1   sys_reg(1, 0, 8, 7, 0)
+#define TLBI_VAE1  sys_reg(1, 0, 8, 7, 1)
+#define TLBI_ASIDE1sys_reg(1, 0, 8, 7, 2)
+#define TLBI_VAAE1 sys_reg(1, 0, 8, 7, 3)
+#define TLBI_VALE1 sys_reg(1, 0, 8, 7, 5)
+#define TLBI_VAALE1sys_reg(1, 0, 8, 7, 7)
+
+void __hyp_text
+__kvm_emulate_tlb_invalidate(struct kvm *kvm, u32 sys_op, u64 regval)
+{
+   kvm = kern_hyp_va(kvm);
+
+   /*
+* Switch to the guest before performing any TLB operations to
+* target

[RFC PATCH 5/7] arm64/kvm: hyp: tlb: use __tlbi() helper

2016-08-16 Thread Punit Agrawal
From: Mark Rutland 

Now that we have a __tlbi() helper, make use of this in the arm64 KVM hyp
code to get rid of asm() boilerplate. At the same time, we simplify
__tlb_flush_vm_context by using __flush_icache_all(), as this has the
appropriate instruction cache maintenance and barrier.

Signed-off-by: Mark Rutland 
Cc: Marc Zyngier 
[ rename tlbi -> __tlbi, convert additional sites, update commit log ]
Signed-off-by: Punit Agrawal 
---
 arch/arm64/kvm/hyp/tlb.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index be8177c..4cda100 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -16,6 +16,7 @@
  */
 
 #include 
+#include 
 
 static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
@@ -32,7 +33,7 @@ static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, 
phys_addr_t ipa)
 * whole of Stage-1. Weep...
 */
ipa >>= 12;
-   asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));
+   __tlbi(ipas2e1is, ipa);
 
/*
 * We have to ensure completion of the invalidation at Stage-2,
@@ -41,7 +42,7 @@ static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, 
phys_addr_t ipa)
 * the Stage-1 invalidation happened first.
 */
dsb(ish);
-   asm volatile("tlbi vmalle1is" : : );
+   __tlbi(vmalle1is);
dsb(ish);
isb();
 
@@ -60,7 +61,7 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
 
-   asm volatile("tlbi vmalls12e1is" : : );
+   __tlbi(vmalls12e1is);
dsb(ish);
isb();
 
@@ -72,9 +73,8 @@ __alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct 
kvm *kvm);
 static void __hyp_text __tlb_flush_vm_context(void)
 {
dsb(ishst);
-   asm volatile("tlbi alle1is  \n"
-"ic ialluis  ": : );
-   dsb(ish);
+   __tlbi(alle1is);
+   __flush_icache_all(); /* contains a dsb(ish) */
 }
 
 __alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
-- 
2.8.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[RFC PATCH 4/7] arm64: tlbflush.h: add __tlbi() macro

2016-08-16 Thread Punit Agrawal
From: Mark Rutland 

As with dsb() and isb(), add a __tbli() helper so that we can avoid
distracting asm boilerplate every time we want a TLBI. As some TLBI
operations take an argument while others do not, some pre-processor is
used to handle these two cases with different assembly blocks.

The existing tlbflush.h code is moved over to use the helper.

Signed-off-by: Mark Rutland 
Cc: Catalin Marinas 
Cc: Marc Zyngier 
Cc: Will Deacon 
[ rename helper to __tlbi, update commit log ]
Signed-off-by: Punit Agrawal 
---
 arch/arm64/include/asm/tlbflush.h | 31 +++
 1 file changed, 23 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/tlbflush.h 
b/arch/arm64/include/asm/tlbflush.h
index b460ae2..d57a0be 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -25,6 +25,21 @@
 #include 
 
 /*
+ * Raw TLBI operations. Drivers and most kernel code should use the TLB
+ * management routines below in preference to these. Where necessary, these can
+ * be used to avoid asm() boilerplate.
+ *
+ * Can be used as __tlbi(op) or __tlbi(op, arg), depending on whether a
+ * particular TLBI op takes an argument or not. The macros below handle 
invoking
+ * the asm with or without the register argument as appropriate.
+ */
+#define TLBI_0(op, arg)asm ("tlbi " #op)
+#define TLBI_1(op, arg)asm ("tlbi " #op ", %0" : : "r" (arg))
+#define TLBI_N(op, arg, n, ...)TLBI_##n(op, arg)
+
+#define __tlbi(op, ...)TLBI_N(op, ##__VA_ARGS__, 1, 0)
+
+/*
  * TLB Management
  * ==
  *
@@ -66,7 +81,7 @@
 static inline void local_flush_tlb_all(void)
 {
dsb(nshst);
-   asm("tlbi   vmalle1");
+   __tlbi(vmalle1);
dsb(nsh);
isb();
 }
@@ -74,7 +89,7 @@ static inline void local_flush_tlb_all(void)
 static inline void flush_tlb_all(void)
 {
dsb(ishst);
-   asm("tlbi   vmalle1is");
+   __tlbi(vmalle1is);
dsb(ish);
isb();
 }
@@ -84,7 +99,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
unsigned long asid = ASID(mm) << 48;
 
dsb(ishst);
-   asm("tlbi   aside1is, %0" : : "r" (asid));
+   __tlbi(aside1is, asid);
dsb(ish);
 }
 
@@ -94,7 +109,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
 
dsb(ishst);
-   asm("tlbi   vale1is, %0" : : "r" (addr));
+   __tlbi(vale1is, addr);
dsb(ish);
 }
 
@@ -122,9 +137,9 @@ static inline void __flush_tlb_range(struct vm_area_struct 
*vma,
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
if (last_level)
-   asm("tlbi vale1is, %0" : : "r"(addr));
+   __tlbi(vale1is, addr);
else
-   asm("tlbi vae1is, %0" : : "r"(addr));
+   __tlbi(vae1is, addr);
}
dsb(ish);
 }
@@ -149,7 +164,7 @@ static inline void flush_tlb_kernel_range(unsigned long 
start, unsigned long end
 
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-   asm("tlbi vaae1is, %0" : : "r"(addr));
+   __tlbi(vaae1is, addr);
dsb(ish);
isb();
 }
@@ -163,7 +178,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
 {
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
 
-   asm("tlbi   vae1is, %0" : : "r" (addr));
+   __tlbi(vae1is, addr);
dsb(ish);
 }
 
-- 
2.8.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[RFC PATCH 3/7] KVM: arm/arm64: Register perf trace event notifier

2016-08-16 Thread Punit Agrawal
Register a notifier to track state changes of perf trace events.

The notifier will enable taking appropriate action for trace events
targeting VM.

Signed-off-by: Punit Agrawal 
Cc: Christoffer Dall 
Cc: Marc Zyngier 
---
 arch/arm/include/asm/kvm_host.h   |   3 +
 arch/arm/kvm/arm.c|   2 +
 arch/arm64/include/asm/kvm_host.h |   8 +++
 arch/arm64/kvm/Kconfig|   4 ++
 arch/arm64/kvm/Makefile   |   1 +
 arch/arm64/kvm/perf_trace.c   | 122 ++
 6 files changed, 140 insertions(+)
 create mode 100644 arch/arm64/kvm/perf_trace.c

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index de338d9..609998e 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -280,6 +280,9 @@ static inline int kvm_arch_dev_ioctl_check_extension(struct 
kvm *kvm, long ext)
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
 
+static inline int kvm_perf_trace_init(void) { return 0; }
+static inline int kvm_perf_trace_teardown(void) { return 0; }
+
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 75f130e..e1b99c4 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1220,6 +1220,7 @@ static int init_subsystems(void)
goto out;
 
kvm_perf_init();
+   kvm_perf_trace_init();
kvm_coproc_table_init();
 
 out:
@@ -1411,6 +1412,7 @@ out_err:
 void kvm_arch_exit(void)
 {
kvm_perf_teardown();
+   kvm_perf_trace_teardown();
 }
 
 static int arm_init(void)
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 3eda975..f6ff8e5 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -345,6 +345,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
 
+#if !defined(CONFIG_KVM_PERF_TRACE)
+static inline int kvm_perf_trace_init(void) { return 0; }
+static inline int kvm_perf_trace_teardown(void) { return 0; }
+#else
+int kvm_perf_trace_init(void);
+int kvm_perf_trace_teardown(void);
+#endif
+
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 9c9edc9..56e9537 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -19,6 +19,9 @@ if VIRTUALIZATION
 config KVM_ARM_VGIC_V3
bool
 
+config KVM_PERF_TRACE
+bool
+
 config KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on OF
@@ -39,6 +42,7 @@ config KVM
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
+   select KVM_PERF_TRACE if EVENT_TRACING && PERF_EVENTS
---help---
  Support hosting virtualized guest machines.
  We don't support KVM with 16K page tables yet, due to the multiple
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 695eb3c..7d175e4 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -19,6 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
 kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
 kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
 kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o 
sys_regs_generic_v8.o
+kvm-$(CONFIG_KVM_PERF_TRACE) += perf_trace.o
 
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
diff --git a/arch/arm64/kvm/perf_trace.c b/arch/arm64/kvm/perf_trace.c
new file mode 100644
index 000..8bacd18
--- /dev/null
+++ b/arch/arm64/kvm/perf_trace.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ * Author: Punit Agrawal 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+#include 
+#include 
+
+typedef int (*perf_trace_callback_fn)(struct kvm *kvm, bool enable);
+
+struct kvm_trace_hook {
+   char *key;
+   perf_trace_callback_fn setup_fn;
+};
+
+static struct kvm_trace_hook trace_hook[] = {
+   { },
+};
+
+static perf_trace_callback_fn find_trace_callback(const char *trace_key)
+{
+   int i;
+
+   for (i = 0; trace_hook[i].key; i++)
+   if (!strcmp(trace_key, trace_hook[i].key))
+   

[RFC PATCH 7/7] arm64: KVM: Enable selective trapping of TLB instructions

2016-08-16 Thread Punit Agrawal
The TTLB bit of Hypervisor Control Register (HCR_EL2) controls the
trapping of guest TLB maintenance instructions. Taking the trap requires
a switch to the hypervisor and is an expensive operation.

Enable selective trapping of guest TLB instructions when the associated
perf trace event is enabled for a specific virtual machine.

Signed-off-by: Punit Agrawal 
Cc: Christoffer Dall 
Cc: Marc Zyngier 
---
 arch/arm64/kvm/perf_trace.c | 32 
 1 file changed, 32 insertions(+)

diff --git a/arch/arm64/kvm/perf_trace.c b/arch/arm64/kvm/perf_trace.c
index 8bacd18..f26da1d 100644
--- a/arch/arm64/kvm/perf_trace.c
+++ b/arch/arm64/kvm/perf_trace.c
@@ -17,6 +17,8 @@
 #include 
 #include 
 
+#include 
+
 typedef int (*perf_trace_callback_fn)(struct kvm *kvm, bool enable);
 
 struct kvm_trace_hook {
@@ -24,7 +26,37 @@ struct kvm_trace_hook {
perf_trace_callback_fn setup_fn;
 };
 
+static int tlb_invalidate_trap(struct kvm *kvm, bool enable)
+{
+   int i;
+   struct kvm_vcpu *vcpu;
+
+   /*
+* Halt the VM to ensure atomic update across all vcpus (this
+* avoids racy behaviour against other modifications of
+* HCR_EL2 such as kvm_toggle_cache/kvm_set_way_flush).
+*/
+   kvm_arm_halt_guest(kvm);
+   kvm_for_each_vcpu(i, vcpu, kvm) {
+   unsigned long hcr = vcpu_get_hcr(vcpu);
+
+   if (enable)
+   hcr |= HCR_TTLB;
+   else
+   hcr &= ~HCR_TTLB;
+
+   vcpu_set_hcr(vcpu, hcr);
+   }
+   kvm_arm_resume_guest(kvm);
+
+   return 0;
+}
+
 static struct kvm_trace_hook trace_hook[] = {
+   {
+   .key = "kvm_tlb_invalidate",
+   .setup_fn = tlb_invalidate_trap,
+   },
{ },
 };
 
-- 
2.8.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[RFC PATCH 0/7] Add support for monitoring guest TLB operations

2016-08-16 Thread Punit Agrawal
Hi,

ARMv8 supports trapping guest TLB maintenance operations to the
hypervisor. This trapping mechanism can be used to monitor the use of
guest TLB instructions.

As taking a trap for every TLB operation can have significant
overhead, trapping should only be enabled -

* on user request
* for the VM of interest

This patchset adds support to listen to perf trace event state change
notifications. The notifications and associated context are then used
to enable trapping of guest TLB operations when requested by the
user. The trap handling generates trace events (kvm_tlb_invalidate)
which can already be counted using existing perf trace
functionality.

Trapping of guest TLB operations is disabled when not being monitored
(reducing profiling overhead).

I would appreciate feedback on the approach to tie the control of TLB
monitoring with perf trace events (Patch 1) especially if there are
any suggestions on avoiding (or reducing) the overhead of "perf trace"
notifications.

I looked at using regfunc/unregfunc tracepoint hooks but they don't
include the event context. But the bigger problem was that the
callbacks are only called on the first instance of simultaneously
executing perf stat invocations.

The patchset is based on v4.8-rc2 and adds support for monitoring
guest TLB operations on 64bit hosts. If the approach taken in the
patches is acceptable, I'll add 32bit host support as well.

With this patchset, 'perf' tool when attached to a VM process can be
used to monitor the TLB operations. E.g., to monitor a VM with process
id 4166 -

# perf stat -e "kvm:kvm_tlb_invalidate" -p 4166

Perform some operations in VM (running 'make -j 7' on the kernel
sources in this instance). Breaking out of perf shows -

Performance counter stats for process id '4166':

 7,471,974  kvm:kvm_tlb_invalidate

 374.235405282 seconds time elapsed

All feedback welcome.

Thanks,
Punit

Mark Rutland (2):
  arm64: tlbflush.h: add __tlbi() macro
  arm64/kvm: hyp: tlb: use __tlbi() helper

Punit Agrawal (5):
  perf/trace: Add notification for perf trace events
  KVM: Track the pid of the VM process
  KVM: arm/arm64: Register perf trace event notifier
  arm64: KVM: Handle trappable TLB instructions
  arm64: KVM: Enable selective trapping of TLB instructions

 arch/arm/include/asm/kvm_host.h   |   3 +
 arch/arm/kvm/arm.c|   2 +
 arch/arm64/include/asm/kvm_asm.h  |   1 +
 arch/arm64/include/asm/kvm_host.h |   8 ++
 arch/arm64/include/asm/tlbflush.h |  31 ++--
 arch/arm64/kvm/Kconfig|   4 +
 arch/arm64/kvm/Makefile   |   1 +
 arch/arm64/kvm/hyp/tlb.c  | 158 --
 arch/arm64/kvm/perf_trace.c   | 154 +
 arch/arm64/kvm/sys_regs.c |  81 +++
 arch/arm64/kvm/trace.h|  16 
 include/linux/kvm_host.h  |   1 +
 include/linux/trace_events.h  |   3 +
 kernel/trace/trace_event_perf.c   |  24 ++
 virt/kvm/kvm_main.c   |   2 +
 15 files changed, 475 insertions(+), 14 deletions(-)
 create mode 100644 arch/arm64/kvm/perf_trace.c

-- 
2.8.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[RFC PATCH 1/7] perf/trace: Add notification for perf trace events

2016-08-16 Thread Punit Agrawal
Add a mechanism to notify listeners about perf trace event state
changes. This enables listeners to take actions requiring the event
context (e.g., attached process).

The notification mechanism can be used to reduce trace point based
profiling overhead by enabling/disabling hardware traps for specific
contexts (e.g., virtual machines).

Signed-off-by: Punit Agrawal 
Cc: Steven Rostedt 
Cc: Ingo Molnar 
---
 include/linux/trace_events.h|  3 +++
 kernel/trace/trace_event_perf.c | 24 
 2 files changed, 27 insertions(+)

diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index be00761..5924032 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -505,6 +505,9 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, 
u16 type,
 {
perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
 }
+
+extern int perf_trace_notifier_register(struct notifier_block *nb);
+extern int perf_trace_notifier_unregister(struct notifier_block *nb);
 #endif
 
 #endif /* _LINUX_TRACE_EVENT_H */
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 562fa69..9cf 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -6,10 +6,12 @@
  */
 
 #include 
+#include 
 #include 
 #include "trace.h"
 
 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
+static RAW_NOTIFIER_HEAD(perf_trace_notifier_list);
 
 /*
  * Force it to be aligned to unsigned long to avoid misaligned accesses
@@ -86,6 +88,26 @@ static int perf_trace_event_perm(struct trace_event_call 
*tp_event,
return 0;
 }
 
+int perf_trace_notifier_register(struct notifier_block *nb)
+{
+   return raw_notifier_chain_register(&perf_trace_notifier_list, nb);
+}
+
+int perf_trace_notifier_unregister(struct notifier_block *nb)
+{
+   return raw_notifier_chain_unregister(&perf_trace_notifier_list, nb);
+}
+
+static void perf_trace_notify(enum trace_reg event, struct perf_event *p_event)
+{
+   /*
+* We use raw notifiers here as we are called with the
+* event_mutex held.
+*/
+   raw_notifier_call_chain(&perf_trace_notifier_list,
+event, p_event);
+}
+
 static int perf_trace_event_reg(struct trace_event_call *tp_event,
struct perf_event *p_event)
 {
@@ -176,6 +198,7 @@ out:
 static int perf_trace_event_open(struct perf_event *p_event)
 {
struct trace_event_call *tp_event = p_event->tp_event;
+   perf_trace_notify(TRACE_REG_PERF_OPEN, p_event);
return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
 }
 
@@ -183,6 +206,7 @@ static void perf_trace_event_close(struct perf_event 
*p_event)
 {
struct trace_event_call *tp_event = p_event->tp_event;
tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
+   perf_trace_notify(TRACE_REG_PERF_CLOSE, p_event);
 }
 
 static int perf_trace_event_init(struct trace_event_call *tp_event,
-- 
2.8.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 6/7] ARM: KVM: Get ready to use vgic-v3

2016-08-16 Thread Vladimir Murzin
We need to take care we have everything vgic-v3 expects from us before
a quantum leap:
- provide required macros via uapi.h
- handle access to GICv3 cpu interface from the guest
- provide mapping between arm64 version of GICv3 cpu registers and arm's

The later is handled via redirection of read{write}_gicreg() and
required mainly because 64-bit wide ICH_LR is split in two 32-bit
halves (ICH_LR and ICH_LRC) accessed independently.

Signed-off-by: Vladimir Murzin 
---
 arch/arm/include/asm/arch_gicv3.h |   64 +
 arch/arm/include/asm/kvm_asm.h|3 ++
 arch/arm/include/uapi/asm/kvm.h   |7 
 arch/arm/kvm/coproc.c |   36 +
 4 files changed, 110 insertions(+)

diff --git a/arch/arm/include/asm/arch_gicv3.h 
b/arch/arm/include/asm/arch_gicv3.h
index af25c32..f93f6bd 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -96,6 +96,70 @@
 #define ICH_AP1R2  __AP1Rx(2)
 #define ICH_AP1R3  __AP1Rx(3)
 
+/* A32-to-A64 mappings used by VGIC save/restore */
+
+#define CPUIF_MAP(a32, a64)\
+static inline void write_ ## a64(u32 val)  \
+{  \
+   write_sysreg(val, a32); \
+}  \
+static inline u32 read_ ## a64(void)   \
+{  \
+   return read_sysreg(a32);\
+}  \
+
+#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
+static inline void write_ ## a64(u64 val)  \
+{  \
+   write_sysreg((u32)val, a32lo);  \
+   write_sysreg((u32)(val >> 32), a32hi);  \
+}  \
+static inline u64 read_ ## a64(void)   \
+{  \
+   u64 val = read_sysreg(a32lo);   \
+   \
+   val |=  (u64)read_sysreg(a32hi) << 32;  \
+   \
+   return val; \
+}
+
+CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
+CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
+CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
+CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
+CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
+CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
+CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
+CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
+CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
+CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
+CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
+CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
+CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
+CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
+CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
+CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
+
+CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
+CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
+CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
+CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
+CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
+CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
+CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
+CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
+CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
+CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
+CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
+CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
+CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
+CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
+CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
+CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
+
+#define read_gicreg(r) read_##r()
+#define write_gicreg(v, r) write_##r(v)
+
 /* Low-level accessors */
 
 static inline void gic_write_eoir(u32 irq)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 58faff5..dfccf94 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -68,6 +68,9 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 extern void __init_stage2_translation(void);
 
 extern void __kvm_hyp_reset(unsigned long);
+
+extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern void __vgic_v3_init_lrs(void);
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index a2b3eb3..b38c10c 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -84,6 +84,13 @@ struct kvm_regs {
 #define KVM_VGIC_V2_DIST_SIZE  0x1000
 #define KVM_VGIC_V2_CPU_SIZE   0x2000
 
+/* Supported VGICv3 address types  */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
+
+#define KVM_VGIC_V3_DIST_SIZE  SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE(2 * SZ_64K)
+
 #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
 #define KVM_ARM_VCPU_PSCI_0_2  1 /* CPU uses PSCI v0.2 */
 
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/cop

[PATCH v2 7/7] ARM: KVM: Unlock vgic-v3 support

2016-08-16 Thread Vladimir Murzin
It is time to get access to common version of vgic-v3.

We basically would need to tell build system how to pick it up and
undo KVM_ARM_VGIC_V3 guarding introduced in 4f64cb6 ("arm/arm64: KVM:
Only allow 64bit hosts to build VGICv3") and remove stubs. However,
since vgic got ITS support KVM_ARM_VGIC_V3 tend to protect a little
bit more than just plain vgic-v3 - this guard is used for ITS too
which is not supported in 32-bit world yet.  So, along with removal of
KVM_ARM_VGIC_V3 guard introduce the new one - KVM_ARM_VGIC_V3_ITS to
protect ITS related code.

The only unpleasant part is how we decide which save/restore sequence to
use under __vgic_save_state() - we don't have patching framework in hand
like arm64, so have to check runtime on every invocation.

Signed-off-by: Vladimir Murzin 
---
 arch/arm/include/asm/kvm_host.h |4 +++
 arch/arm/include/asm/kvm_hyp.h  |5 
 arch/arm/kvm/Makefile   |2 ++
 arch/arm/kvm/hyp/Makefile   |1 +
 arch/arm/kvm/hyp/switch.c   |   20 +++--
 arch/arm64/kvm/Kconfig  |4 +--
 include/kvm/arm_vgic.h  |8 --
 virt/kvm/arm/vgic/vgic-kvm-device.c |   10 ++-
 virt/kvm/arm/vgic/vgic-mmio-v3.c|2 ++
 virt/kvm/arm/vgic/vgic-mmio.c   |2 --
 virt/kvm/arm/vgic/vgic-mmio.h   |3 --
 virt/kvm/arm/vgic/vgic.h|   54 ++-
 12 files changed, 38 insertions(+), 77 deletions(-)

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index de338d9..1312597 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -39,7 +39,11 @@
 
 #include 
 
+#ifdef CONFIG_ARM_GIC_V3
+#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
+#else
 #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
+#endif
 
 #define KVM_REQ_VCPU_EXIT  8
 
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index e604ad68..95669b3 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -88,6 +88,8 @@
 
 #define VFP_FPEXC  __ACCESS_VFP(FPEXC)
 
+#define ID_PFR1__ACCESS_CP15(c0, 0, c1, 1)
+
 /* AArch64 compatibility macros, only for the timer so far */
 #define read_sysreg_el0(r) read_sysreg(r##_el0)
 #define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
@@ -103,6 +105,9 @@ void __timer_restore_state(struct kvm_vcpu *vcpu);
 void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
 void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
 
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
 void __sysreg_save_state(struct kvm_cpu_context *ctxt);
 void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
 
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 10d77a6..043d817f 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -26,8 +26,10 @@ obj-y += $(KVM)/arm/vgic/vgic.o
 obj-y += $(KVM)/arm/vgic/vgic-init.o
 obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
 obj-y += $(KVM)/arm/vgic/vgic-v2.o
+obj-y += $(KVM)/arm/vgic/vgic-v3.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
+obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
 obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
 obj-y += $(KVM)/irqchip.o
 obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 8dfa5f7..3023bb5 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -5,6 +5,7 @@
 KVM=../../../../virt/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
 
 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index b13caa9..9666bae 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -15,6 +15,8 @@
  * along with this program.  If not, see .
  */
 
+#include 
+
 #include 
 #include 
 
@@ -74,14 +76,28 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu 
*vcpu)
write_sysreg(read_sysreg(MIDR), VPIDR);
 }
 
+static bool __hyp_text __has_useable_gicv3_cpuif(void)
+{
+   if (IS_ENABLED(CONFIG_ARM_GIC_V3) && (read_sysreg(ID_PFR1) >> 28))
+   return !!(read_sysreg(ICC_HSRE) & ICC_SRE_EL2_SRE);
+   else
+   return false;
+}
+
 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
 {
-   __vgic_v2_save_state(vcpu);
+   if (__has_useable_gicv3_cpuif())
+   __vgic_v3_save_state(vcpu);
+   else
+   __vgic_v2_save_state(vcpu);
 }
 
 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
 {
-   __vgic_v2_restore_state(vcpu);
+   if (__has_useable_gicv3_cpuif())
+   __vgic_v3_restore_state(vcpu);
+   else
+   __vgic_v2_restore_state(vcpu);
 }
 
 static bool __hyp_text __populate_fault_info(struct k

[PATCH v2 4/7] ARM: update MPIDR accessors macro

2016-08-16 Thread Vladimir Murzin
vgic-v3 driver queries CPU affinity level up to Aff3, which is valid for arm64.
However, for arm up to Aff2 levels are supported, so querying for 3rd level
ends with upper bits of MPIDR are treated as valid affinity level which
is not true. So, report zero for any affinity level above 2.

Signed-off-by: Vladimir Murzin 
---
 arch/arm/include/asm/cputype.h |3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 1ee94c7..96cef49 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -55,9 +55,10 @@
 
 #define MPIDR_LEVEL_BITS 8
 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+#define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
 
 #define MPIDR_AFFINITY_LEVEL(mpidr, level) \
-   ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
+   ((level < 3) ? ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK) 
: 0)
 
 #define ARM_CPU_IMP_ARM0x41
 #define ARM_CPU_IMP_INTEL  0x69
-- 
1.7.9.5

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 5/7] ARM: move system register accessors to asm/cp15.h

2016-08-16 Thread Vladimir Murzin
Macro __ACCESS_CP15{_64} is defined in two headers (arch_gicv3.h and
kvm_hyp.h) which are going to be requested by vgic-v3 altogether.
GCC would not like it because it'd see that macro is redefined and (hey!)
they are different.  So, let's put only single macro version under common
place and use it everywhere.

Signed-off-by: Vladimir Murzin 
---
 arch/arm/include/asm/arch_gicv3.h |   27 +++
 arch/arm/include/asm/cp15.h   |   15 +++
 arch/arm/include/asm/kvm_hyp.h|   15 +--
 3 files changed, 27 insertions(+), 30 deletions(-)

diff --git a/arch/arm/include/asm/arch_gicv3.h 
b/arch/arm/include/asm/arch_gicv3.h
index e08d151..af25c32 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -22,9 +22,7 @@
 
 #include 
 #include 
-
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2)  p15, Op1, %0, CRn, CRm, Op2
-#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
+#include 
 
 #define ICC_EOIR1  __ACCESS_CP15(c12, 0, c12, 1)
 #define ICC_DIR__ACCESS_CP15(c12, 0, c11, 1)
@@ -102,58 +100,55 @@
 
 static inline void gic_write_eoir(u32 irq)
 {
-   asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
+   write_sysreg(irq, ICC_EOIR1);
isb();
 }
 
 static inline void gic_write_dir(u32 val)
 {
-   asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
+   write_sysreg(val, ICC_DIR);
isb();
 }
 
 static inline u32 gic_read_iar(void)
 {
-   u32 irqstat;
+   u32 irqstat = read_sysreg(ICC_IAR1);
 
-   asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
dsb(sy);
+
return irqstat;
 }
 
 static inline void gic_write_pmr(u32 val)
 {
-   asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
+   write_sysreg(val, ICC_PMR);
 }
 
 static inline void gic_write_ctlr(u32 val)
 {
-   asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
+   write_sysreg(val, ICC_CTLR);
isb();
 }
 
 static inline void gic_write_grpen1(u32 val)
 {
-   asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
+   write_sysreg(val, ICC_IGRPEN1);
isb();
 }
 
 static inline void gic_write_sgi1r(u64 val)
 {
-   asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
+   write_sysreg(val, ICC_SGI1R);
 }
 
 static inline u32 gic_read_sre(void)
 {
-   u32 val;
-
-   asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
-   return val;
+   return read_sysreg(ICC_SRE);
 }
 
 static inline void gic_write_sre(u32 val)
 {
-   asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
+   write_sysreg(val, ICC_SRE);
isb();
 }
 
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index c3f1152..f661732 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -47,6 +47,21 @@
 #define vectors_high() (0)
 #endif
 
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2)  \
+   "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+   "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+
+#define __read_sysreg(r, w, c, t) ({   \
+   t __val;\
+   asm volatile(r " " c : "=r" (__val));   \
+   __val;  \
+})
+#define read_sysreg(...)   __read_sysreg(__VA_ARGS__)
+
+#define __write_sysreg(v, r, w, c, t)  asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...)   __write_sysreg(v, __VA_ARGS__)
+
 #ifdef CONFIG_CPU_CP15
 
 extern unsigned long cr_alignment; /* defined in entry-armv.S */
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 6eaff28..e604ad68 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -20,28 +20,15 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 
 #define __hyp_text __section(.hyp.text) notrace
 
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2)  \
-   "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
-#define __ACCESS_CP15_64(Op1, CRm) \
-   "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
 #define __ACCESS_VFP(CRn)  \
"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
 
-#define __write_sysreg(v, r, w, c, t)  asm volatile(w " " c : : "r" ((t)(v)))
-#define write_sysreg(v, ...)   __write_sysreg(v, __VA_ARGS__)
-
-#define __read_sysreg(r, w, c, t) ({   \
-   t __val;\
-   asm volatile(r " " c : "=r" (__val));   \
-   __val;  \
-})
-#define read_sysreg(...)   __read_sysreg(__VA_ARGS__)
-
 #define write_special(v, r)\

[PATCH v2 3/7] KVM: arm: vgic-new: improve compatibility with 32-bit

2016-08-16 Thread Vladimir Murzin
We have couple of 64-bit register defined in GICv3 architecture, so
"unsigned long" kind of accessors wouldn't work for 32-bit. However,
these registers can't be access as 64-bit in a one go if we run 32-bit
host simply because KVM doesn't support multiple load/store on MMIO
space.

It means that 32-bit guest access these registers in 32-bit chunks, so
the only thing we need to do is to ensure that extract_bytes() always
takes 64-bit data.

Since we are here fix couple of other width related issues by using
ULL variants over UL.

Signed-off-by: Vladimir Murzin 
---
 virt/kvm/arm/vgic/vgic-mmio-v3.c |6 +++---
 virt/kvm/arm/vgic/vgic-mmio.h|2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index ff668e0..cc20b60 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -23,7 +23,7 @@
 #include "vgic-mmio.h"
 
 /* extract @num bytes at @offset bytes offset in data */
-unsigned long extract_bytes(unsigned long data, unsigned int offset,
+unsigned long extract_bytes(u64 data, unsigned int offset,
unsigned int num)
 {
return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
@@ -179,7 +179,7 @@ static unsigned long vgic_mmio_read_v3r_typer(struct 
kvm_vcpu *vcpu,
int target_vcpu_id = vcpu->vcpu_id;
u64 value;
 
-   value = (mpidr & GENMASK(23, 0)) << 32;
+   value = (mpidr & GENMASK_ULL(23, 0)) << 32;
value |= ((target_vcpu_id & 0x) << 8);
if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
value |= GICR_TYPER_LAST;
@@ -603,7 +603,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
bool broadcast;
 
sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
-   broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+   broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> 
ICC_SGI1R_TARGET_LIST_SHIFT;
mpidr = SGI_AFFINITY_LEVEL(reg, 3);
mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 0b3ecf9..80f92ce 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -96,7 +96,7 @@ unsigned long vgic_data_mmio_bus_to_host(const void *val, 
unsigned int len);
 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
unsigned long data);
 
-unsigned long extract_bytes(unsigned long data, unsigned int offset,
+unsigned long extract_bytes(u64 data, unsigned int offset,
unsigned int num);
 
 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
-- 
1.7.9.5

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 2/7] arm64: KVM: Move vgic-v3 save/restore to virt/kvm/arm/hyp

2016-08-16 Thread Vladimir Murzin
So we can reuse the code under arch/arm

Signed-off-by: Vladimir Murzin 
---
 arch/arm64/kvm/hyp/Makefile   |2 +-
 {arch/arm64/kvm => virt/kvm/arm}/hyp/vgic-v3-sr.c |0
 2 files changed, 1 insertion(+), 1 deletion(-)
 rename {arch/arm64/kvm => virt/kvm/arm}/hyp/vgic-v3-sr.c (100%)

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 0c85feb..aaf42ae 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -5,9 +5,9 @@
 KVM=../../../../virt/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
 
-obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += entry.o
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
similarity index 100%
rename from arch/arm64/kvm/hyp/vgic-v3-sr.c
rename to virt/kvm/arm/hyp/vgic-v3-sr.c
-- 
1.7.9.5

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 1/7] arm64: KVM: Move GIC accessors to arch_gicv3.h

2016-08-16 Thread Vladimir Murzin
Since we are going to share vgic-v3 save/restore code with ARM
keep arch specific accessors separately.

Signed-off-by: Vladimir Murzin 
---
 arch/arm64/include/asm/arch_gicv3.h |   13 +
 arch/arm64/kvm/hyp/vgic-v3-sr.c |   13 -
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/include/asm/arch_gicv3.h 
b/arch/arm64/include/asm/arch_gicv3.h
index 8ec88e5..ae7dbd7 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -79,6 +79,19 @@
 #include 
 #include 
 
+#define read_gicreg(r) \
+   ({  \
+   u64 reg;\
+   asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
+   reg;\
+   })
+
+#define write_gicreg(v,r)  \
+   do {\
+   u64 __val = (v);\
+   asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
+   } while (0)
+
 /*
  * Low-level accessors
  *
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 5f8f80b..f2dbd2e 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -24,19 +24,6 @@
 #define vtr_to_max_lr_idx(v)   ((v) & 0xf)
 #define vtr_to_nr_pri_bits(v)  (((u32)(v) >> 29) + 1)
 
-#define read_gicreg(r) \
-   ({  \
-   u64 reg;\
-   asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
-   reg;\
-   })
-
-#define write_gicreg(v,r)  \
-   do {\
-   u64 __val = (v);\
-   asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
-   } while (0)
-
 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
 {
switch (lr & 0xf) {
-- 
1.7.9.5

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 0/7] ARM: KVM: Support for vgic-v3

2016-08-16 Thread Vladimir Murzin
Hi,

This is an attempt to make use vgic-v3 under arch/arm since save-restore
functionality got re-written in C and can be shared between arm/arm64
like it has already been done for vgic-v2 and timer.

With this patches I'm able to get 32 core ARMv7 guest boot:

...
GICv3: CPU31: found redistributor 703 region 0:0x3ffd
CPU31: thread -1, cpu 3, socket 7, mpidr 8703
Brought up 32 CPUs
SMP: Total of 32 processors activated (768.00 BogoMIPS).
CPU: All CPU(s) started in SVC mode.
...

Additionally, quite lightweight test based on Self IPI guest test[1]
has been run with up to 255 cpus.

[1] http://www.spinics.net/lists/kvm/msg128974.html

Changelog:

v1 -> v2
   - rebased on v4.8-rc2
   - introduced guard for ITS code

Thanks!

Vladimir Murzin (7):
  arm64: KVM: Move GIC accessors to arch_gicv3.h
  arm64: KVM: Move vgic-v3 save/restore to virt/kvm/arm/hyp
  KVM: arm: vgic-new: improve compatibility with 32-bit
  ARM: update MPIDR accessors macro
  ARM: move system register accessors to asm/cp15.h
  ARM: KVM: Get ready to use vgic-v3
  ARM: KVM: Unlock vgic-v3 support

 arch/arm/include/asm/arch_gicv3.h |   91 +
 arch/arm/include/asm/cp15.h   |   15 
 arch/arm/include/asm/cputype.h|3 +-
 arch/arm/include/asm/kvm_asm.h|3 +
 arch/arm/include/asm/kvm_host.h   |4 +
 arch/arm/include/asm/kvm_hyp.h|   20 ++---
 arch/arm/include/uapi/asm/kvm.h   |7 ++
 arch/arm/kvm/Makefile |2 +
 arch/arm/kvm/coproc.c |   36 
 arch/arm/kvm/hyp/Makefile |1 +
 arch/arm/kvm/hyp/switch.c |   20 -
 arch/arm64/include/asm/arch_gicv3.h   |   13 +++
 arch/arm64/kvm/Kconfig|4 +-
 arch/arm64/kvm/hyp/Makefile   |2 +-
 include/kvm/arm_vgic.h|8 --
 {arch/arm64/kvm => virt/kvm/arm}/hyp/vgic-v3-sr.c |   13 ---
 virt/kvm/arm/vgic/vgic-kvm-device.c   |   10 +--
 virt/kvm/arm/vgic/vgic-mmio-v3.c  |8 +-
 virt/kvm/arm/vgic/vgic-mmio.c |2 -
 virt/kvm/arm/vgic/vgic-mmio.h |5 +-
 virt/kvm/arm/vgic/vgic.h  |   54 +---
 21 files changed, 195 insertions(+), 126 deletions(-)
 rename {arch/arm64/kvm => virt/kvm/arm}/hyp/vgic-v3-sr.c (96%)

-- 
1.7.9.5

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm