Re: [PATCH V7 10/10] arm/arm64: KVM: add guest SEA support

2017-01-16 Thread Baicar, Tyler

Hello Marc,


On 1/16/2017 4:58 AM, Marc Zyngier wrote:

Hi Tyler,

On 12/01/17 18:15, Tyler Baicar wrote:

Currently external aborts are unsupported by the guest abort
handling. Add handling for SEAs so that the host kernel reports
SEAs which occur in the guest kernel.

Signed-off-by: Tyler Baicar 
---
  arch/arm/include/asm/kvm_arm.h   |  1 +
  arch/arm/include/asm/system_misc.h   |  5 +
  arch/arm/kvm/mmu.c   | 18 --
  arch/arm64/include/asm/kvm_arm.h |  1 +
  arch/arm64/include/asm/system_misc.h |  2 ++
  arch/arm64/mm/fault.c| 13 +
  6 files changed, 38 insertions(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index e22089f..33a77509 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -187,6 +187,7 @@
  #define FSC_FAULT (0x04)
  #define FSC_ACCESS(0x08)
  #define FSC_PERM  (0x0c)
+#define FSC_EXTABT (0x10)
  
  /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */

  #define HPFAR_MASK(~0xf)
diff --git a/arch/arm/include/asm/system_misc.h 
b/arch/arm/include/asm/system_misc.h
index a3d61ad..ea45d94 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -24,4 +24,9 @@ extern unsigned int user_debug;
  
  #endif /* !__ASSEMBLY__ */
  
+static inline int handle_guest_sea(unsigned long addr, unsigned int esr)

+{
+   return -1;
+}
+
  #endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index e9a5c0e..1152966 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -29,6 +29,7 @@
  #include 
  #include 
  #include 
+#include 
  
  #include "trace.h"
  
@@ -1441,8 +1442,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
  
  	/* Check the stage-2 fault is trans. fault or write fault */

fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
-   if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
-   fault_status != FSC_ACCESS) {
+
+   /* The host kernel will handle the synchronous external abort. There
+* is no need to pass the error into the guest.
+*/
+   if (fault_status == FSC_EXTABT) {
+   if(handle_guest_sea((unsigned long)fault_ipa,
+   kvm_vcpu_get_hsr(vcpu))) {
+   kvm_err("Failed to handle guest SEA, FSC: EC=%#x xFSC=%#lx 
ESR_EL2=%#lx\n",
+   kvm_vcpu_trap_get_class(vcpu),
+   (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
+   (unsigned long)kvm_vcpu_get_hsr(vcpu));

So there's one thing I don't like here, which is that we just gave the
guest a very nice way to pollute the host's kernel log with spurious
messages. So I'd rather make it silent, or at the very least rate limited.
Before this patch, if a guest exits with FSC_EXTABT, then the below 
print for
"Unsupported FSC..." would happen. So this print isn't really adding any 
noise
that isn't already there. Also, this print would only happen if 
handle_guest_sea
fails. If you still think this print should be removed then I will 
remove it though.

+   return -EFAULT;
+   }
+   } else if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
+  fault_status != FSC_ACCESS) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 4b5c977..be0efb6 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -201,6 +201,7 @@
  #define FSC_FAULT ESR_ELx_FSC_FAULT
  #define FSC_ACCESSESR_ELx_FSC_ACCESS
  #define FSC_PERM  ESR_ELx_FSC_PERM
+#define FSC_EXTABT ESR_ELx_FSC_EXTABT
  
  /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */

  #define HPFAR_MASK(~UL(0xf))
diff --git a/arch/arm64/include/asm/system_misc.h 
b/arch/arm64/include/asm/system_misc.h
index e7f3440..27816cb 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -77,4 +77,6 @@ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, 
const char *cmd);
  int register_sea_notifier(struct notifier_block *nb);
  void unregister_sea_notifier(struct notifier_block *nb);
  
+int handle_guest_sea(unsigned long addr, unsigned int esr);

+
  #endif/* __ASM_SYSTEM_MISC_H */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 81039c7..fa8d4d7 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -597,6 +597,19 @@ static const char *fault_name(unsigned int esr)
  }
  
  /*

+ * Handle Synchronous External Aborts that occur in a guest kernel.
+ */
+int handle_guest_sea(unsigned long addr, 

Re: [PATCH V7 04/10] arm64: exception: handle Synchronous External Abort

2017-01-16 Thread Baicar, Tyler

Hello Will,


On 1/16/2017 4:53 AM, Will Deacon wrote:

On Thu, Jan 12, 2017 at 11:15:18AM -0700, Tyler Baicar wrote:

SEA exceptions are often caused by an uncorrected hardware
error, and are handled when data abort and instruction abort
exception classes have specific values for their Fault Status
Code.
When SEA occurs, before killing the process, go through
the handlers registered in the notification list.
Update fault_info[] with specific SEA faults so that the
new SEA handler is used.

Signed-off-by: Tyler Baicar 
Signed-off-by: Jonathan (Zhixiong) Zhang 
Signed-off-by: Naveen Kaje 
---
  arch/arm64/include/asm/system_misc.h | 13 
  arch/arm64/mm/fault.c| 58 +---
  2 files changed, 61 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/system_misc.h 
b/arch/arm64/include/asm/system_misc.h
index 57f110b..e7f3440 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -64,4 +64,17 @@ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, 
const char *cmd);
  
  #endif	/* __ASSEMBLY__ */
  
+/*

+ * The functions below are used to register and unregister callbacks
+ * that are to be invoked when a Synchronous External Abort (SEA)
+ * occurs. An SEA is raised by certain fault status codes that have
+ * either data or instruction abort as the exception class, and
+ * callbacks may be registered to parse or handle such hardware errors.
+ *
+ * Registered callbacks are run in an interrupt/atomic context. They
+ * are not allowed to block or sleep.
+ */
+int register_sea_notifier(struct notifier_block *nb);
+void unregister_sea_notifier(struct notifier_block *nb);

I still don't understand why you need notifiers for this. You register
precisely one hook in the series.
I didn't see a response to my last comment on the previous series so I 
just left it in for this series.
The notifier usage is consistent with the GHES code for SCI errors which 
are also only used a single
time in the code. If you think making the call directly is a better 
option I will remove the notifiers.

  #endif/* __ASM_SYSTEM_MISC_H */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 05d2bd7..81039c7 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -39,6 +39,22 @@
  #include 
  #include 
  
+/*

+ * GHES SEA handler code may register a notifier call here to
+ * handle HW error record passed from platform.
+ */
+static ATOMIC_NOTIFIER_HEAD(sea_handler_chain);
+
+int register_sea_notifier(struct notifier_block *nb)
+{
+   return atomic_notifier_chain_register(_handler_chain, nb);
+}
+
+void unregister_sea_notifier(struct notifier_block *nb)
+{
+   atomic_notifier_chain_unregister(_handler_chain, nb);
+}
+
  static const char *fault_name(unsigned int esr);
  
  #ifdef CONFIG_KPROBES

@@ -480,6 +496,28 @@ static int do_bad(unsigned long addr, unsigned int esr, 
struct pt_regs *regs)
return 1;
  }
  
+/*

+ * This abort handler deals with Synchronous External Abort.
+ * It calls notifiers, and then returns "fault".
+ */
+static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+{
+   struct siginfo info;
+
+   atomic_notifier_call_chain(_handler_chain, 0, NULL);
+
+   pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
+fault_name(esr), esr, addr);
+
+   info.si_signo = SIGBUS;
+   info.si_errno = 0;
+   info.si_code  = 0;
+   info.si_addr  = (void __user *)addr;
+   arm64_notify_die("", regs, , esr);
+
+   return 0;
+}
+
  static const struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs 
*regs);
int sig;
@@ -502,22 +540,22 @@ static const struct fault_info {
{ do_page_fault,SIGSEGV, SEGV_ACCERR,   "level 1 permission 
fault"},
{ do_page_fault,SIGSEGV, SEGV_ACCERR,   "level 2 permission 
fault"},
{ do_page_fault,SIGSEGV, SEGV_ACCERR,   "level 3 permission 
fault"},
-   { do_bad,   SIGBUS,  0, "synchronous external 
abort"  },
+   { do_sea,   SIGBUS,  0, "synchronous external 
abort"  },
{ do_bad,   SIGBUS,  0, "unknown 17"
  },
{ do_bad,   SIGBUS,  0, "unknown 18"
  },
{ do_bad,   SIGBUS,  0, "unknown 19"
  },
-   { do_bad,   SIGBUS,  0, "synchronous abort 
(translation table walk)" },
-   { do_bad,   SIGBUS,  0, "synchronous abort 
(translation table walk)" },
-   { do_bad,   SIGBUS,  0, "synchronous abort 
(translation table walk)" },
-   { do_bad,   SIGBUS,  0, "synchronous abort 
(translation 

Re: [PATCH v3 2/5] arm64: Work around Falkor erratum 1003

2017-01-16 Thread Christopher Covington
Hi Mark,

On 01/11/2017 01:45 PM, Mark Rutland wrote:
> On Wed, Jan 11, 2017 at 12:40:42PM -0600, Timur Tabi wrote:
>> On 01/11/2017 12:37 PM, Mark Rutland wrote:
>>> The name, as it is, is perfectly descriptive.
>>>
>>> Let's not sacrifice legibility over a non-issue.
>>
>> I don't want to kick a dead horse or anything, but changing it to
>> QCOM_FLKR_ERRATUM_1003 would eliminate all the spacing problems
>> without sacrificing anything.
> 
> The CPU is called "Falkor", not "FLKR", and we're not coming up with an
> ACPI table name...
> 
> The ARM Ltd. erratum numbers are global to all parts, so we don't
> include the part name. Is the 1003 erratum number specific to Falkor?
>
> If it's global, you could use QCOM_ERRATUM_1003 instead.

E1003 is specific to Falkor, and hopefully just its first major revision.
Qualcomm Technology's first/previous generation ARMv8 custom
microarchitecture used errata numbers below 1000. I am not aware of
global coordination in the numbering, unfortunately.

> Otherwise, QCOM_FALKOR_ERRATUM_1003 is preferable.

Thanks,
Cov

-- 
Qualcomm Datacenter Technologies, Inc. as an affiliate of Qualcomm
Technologies, Inc. Qualcomm Technologies, Inc. is a member of the Code
Aurora Forum, a Linux Foundation Collaborative Project.
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 2/3] KVM: arm64: Access CNTHCTL_EL2 bit fields correctly on VHE systems

2017-01-16 Thread Suzuki K Poulose

On 16/01/17 14:11, Marc Zyngier wrote:

On 16/01/17 13:30, Marc Zyngier wrote:

On 13/01/17 14:56, Suzuki K Poulose wrote:

On 13/01/17 13:30, Marc Zyngier wrote:

[+ Suzuki, who wrote the whole cpus_have_const_cap thing]



[...]


But maybe we should have have some stronger guarantees that we'll
always get things inlined, and that the "const" side is enforced:


Agreed.



diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
index b4989df..4710469 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -105,10 +105,11 @@ static inline bool cpu_have_feature(unsigned int num)
 }

 /* System capability check for constant caps */
-static inline bool cpus_have_const_cap(int num)
+static __always_inline bool cpus_have_const_cap(int num)


I think we should have the above change and make it inline always.


 {
-   if (num >= ARM64_NCAPS)
-   return false;
+   BUILD_BUG_ON(!__builtin_constant_p(num));


This is not needed, as the compilation would fail if num is not a constant with
static key code.


I also just checked this, and it doesn't fail if the compiler doesn't
directly supports jump labels (we then fallback to the static key being
a standard memory access).


Ah, I missed that part of the story. Sorry about that. Please go ahead with the
changes. I had a similar check in my first version and was dropped later with a
similar review comment. We hadn't considered older tool chain.


Suzuki
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 2/3] KVM: arm64: Access CNTHCTL_EL2 bit fields correctly on VHE systems

2017-01-16 Thread Marc Zyngier
On 16/01/17 13:30, Marc Zyngier wrote:
> On 13/01/17 14:56, Suzuki K Poulose wrote:
>> On 13/01/17 13:30, Marc Zyngier wrote:
>>> [+ Suzuki, who wrote the whole cpus_have_const_cap thing]
>>>
> 
> [...]
> 
>>> But maybe we should have have some stronger guarantees that we'll
>>> always get things inlined, and that the "const" side is enforced:
>>
>> Agreed.
>>
>>>
>>> diff --git a/arch/arm64/include/asm/cpufeature.h 
>>> b/arch/arm64/include/asm/cpufeature.h
>>> index b4989df..4710469 100644
>>> --- a/arch/arm64/include/asm/cpufeature.h
>>> +++ b/arch/arm64/include/asm/cpufeature.h
>>> @@ -105,10 +105,11 @@ static inline bool cpu_have_feature(unsigned int num)
>>>  }
>>>
>>>  /* System capability check for constant caps */
>>> -static inline bool cpus_have_const_cap(int num)
>>> +static __always_inline bool cpus_have_const_cap(int num)
>>
>> I think we should have the above change and make it inline always.
>>
>>>  {
>>> -   if (num >= ARM64_NCAPS)
>>> -   return false;
>>> +   BUILD_BUG_ON(!__builtin_constant_p(num));
>>
>> This is not needed, as the compilation would fail if num is not a constant 
>> with
>> static key code.

I also just checked this, and it doesn't fail if the compiler doesn't
directly supports jump labels (we then fallback to the static key being
a standard memory access).

Thanks,

M.
-- 
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 2/3] KVM: arm64: Access CNTHCTL_EL2 bit fields correctly on VHE systems

2017-01-16 Thread Marc Zyngier
On 13/01/17 14:56, Suzuki K Poulose wrote:
> On 13/01/17 13:30, Marc Zyngier wrote:
>> [+ Suzuki, who wrote the whole cpus_have_const_cap thing]
>>

[...]

>> But maybe we should have have some stronger guarantees that we'll
>> always get things inlined, and that the "const" side is enforced:
> 
> Agreed.
> 
>>
>> diff --git a/arch/arm64/include/asm/cpufeature.h 
>> b/arch/arm64/include/asm/cpufeature.h
>> index b4989df..4710469 100644
>> --- a/arch/arm64/include/asm/cpufeature.h
>> +++ b/arch/arm64/include/asm/cpufeature.h
>> @@ -105,10 +105,11 @@ static inline bool cpu_have_feature(unsigned int num)
>>  }
>>
>>  /* System capability check for constant caps */
>> -static inline bool cpus_have_const_cap(int num)
>> +static __always_inline bool cpus_have_const_cap(int num)
> 
> I think we should have the above change and make it inline always.
> 
>>  {
>> -if (num >= ARM64_NCAPS)
>> -return false;
>> +BUILD_BUG_ON(!__builtin_constant_p(num));
> 
> This is not needed, as the compilation would fail if num is not a constant 
> with
> static key code.
> 
>> +BUILD_BUG_ON(num >= ARM64_NCAPS);
>> +
> 
> Also, I think it would be good to return false for caps > the ARM64_NCAPS, in 
> sync
> with the non-const version.

But what's the semantic? It means we're accessing a capability that
doesn't exist, which looks like a major bug in my book. Is there any
valid use case for this?

Thanks,

M.
-- 
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH V7 10/10] arm/arm64: KVM: add guest SEA support

2017-01-16 Thread Marc Zyngier
Hi Tyler,

On 12/01/17 18:15, Tyler Baicar wrote:
> Currently external aborts are unsupported by the guest abort
> handling. Add handling for SEAs so that the host kernel reports
> SEAs which occur in the guest kernel.
> 
> Signed-off-by: Tyler Baicar 
> ---
>  arch/arm/include/asm/kvm_arm.h   |  1 +
>  arch/arm/include/asm/system_misc.h   |  5 +
>  arch/arm/kvm/mmu.c   | 18 --
>  arch/arm64/include/asm/kvm_arm.h |  1 +
>  arch/arm64/include/asm/system_misc.h |  2 ++
>  arch/arm64/mm/fault.c| 13 +
>  6 files changed, 38 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
> index e22089f..33a77509 100644
> --- a/arch/arm/include/asm/kvm_arm.h
> +++ b/arch/arm/include/asm/kvm_arm.h
> @@ -187,6 +187,7 @@
>  #define FSC_FAULT(0x04)
>  #define FSC_ACCESS   (0x08)
>  #define FSC_PERM (0x0c)
> +#define FSC_EXTABT   (0x10)
>  
>  /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
>  #define HPFAR_MASK   (~0xf)
> diff --git a/arch/arm/include/asm/system_misc.h 
> b/arch/arm/include/asm/system_misc.h
> index a3d61ad..ea45d94 100644
> --- a/arch/arm/include/asm/system_misc.h
> +++ b/arch/arm/include/asm/system_misc.h
> @@ -24,4 +24,9 @@ extern unsigned int user_debug;
>  
>  #endif /* !__ASSEMBLY__ */
>  
> +static inline int handle_guest_sea(unsigned long addr, unsigned int esr)
> +{
> + return -1;
> +}
> +
>  #endif /* __ASM_ARM_SYSTEM_MISC_H */
> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
> index e9a5c0e..1152966 100644
> --- a/arch/arm/kvm/mmu.c
> +++ b/arch/arm/kvm/mmu.c
> @@ -29,6 +29,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #include "trace.h"
>  
> @@ -1441,8 +1442,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, 
> struct kvm_run *run)
>  
>   /* Check the stage-2 fault is trans. fault or write fault */
>   fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
> - if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
> - fault_status != FSC_ACCESS) {
> +
> + /* The host kernel will handle the synchronous external abort. There
> +  * is no need to pass the error into the guest.
> +  */
> + if (fault_status == FSC_EXTABT) {
> + if(handle_guest_sea((unsigned long)fault_ipa,
> + kvm_vcpu_get_hsr(vcpu))) {
> + kvm_err("Failed to handle guest SEA, FSC: EC=%#x 
> xFSC=%#lx ESR_EL2=%#lx\n",
> + kvm_vcpu_trap_get_class(vcpu),
> + (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
> + (unsigned long)kvm_vcpu_get_hsr(vcpu));

So there's one thing I don't like here, which is that we just gave the
guest a very nice way to pollute the host's kernel log with spurious
messages. So I'd rather make it silent, or at the very least rate limited.

> + return -EFAULT;
> + }
> + } else if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
> +fault_status != FSC_ACCESS) {
>   kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
>   kvm_vcpu_trap_get_class(vcpu),
>   (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
> diff --git a/arch/arm64/include/asm/kvm_arm.h 
> b/arch/arm64/include/asm/kvm_arm.h
> index 4b5c977..be0efb6 100644
> --- a/arch/arm64/include/asm/kvm_arm.h
> +++ b/arch/arm64/include/asm/kvm_arm.h
> @@ -201,6 +201,7 @@
>  #define FSC_FAULTESR_ELx_FSC_FAULT
>  #define FSC_ACCESS   ESR_ELx_FSC_ACCESS
>  #define FSC_PERM ESR_ELx_FSC_PERM
> +#define FSC_EXTABT   ESR_ELx_FSC_EXTABT
>  
>  /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
>  #define HPFAR_MASK   (~UL(0xf))
> diff --git a/arch/arm64/include/asm/system_misc.h 
> b/arch/arm64/include/asm/system_misc.h
> index e7f3440..27816cb 100644
> --- a/arch/arm64/include/asm/system_misc.h
> +++ b/arch/arm64/include/asm/system_misc.h
> @@ -77,4 +77,6 @@ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, 
> const char *cmd);
>  int register_sea_notifier(struct notifier_block *nb);
>  void unregister_sea_notifier(struct notifier_block *nb);
>  
> +int handle_guest_sea(unsigned long addr, unsigned int esr);
> +
>  #endif   /* __ASM_SYSTEM_MISC_H */
> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
> index 81039c7..fa8d4d7 100644
> --- a/arch/arm64/mm/fault.c
> +++ b/arch/arm64/mm/fault.c
> @@ -597,6 +597,19 @@ static const char *fault_name(unsigned int esr)
>  }
>  
>  /*
> + * Handle Synchronous External Aborts that occur in a guest kernel.
> + */
> +int handle_guest_sea(unsigned long addr, unsigned int esr)
> +{
> + atomic_notifier_call_chain(_handler_chain, 0, NULL);
> +
> + pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
> + fault_name(esr), esr, addr);

Same here.

> +
> + return 0;
> +}
> +
> 

Re: [PATCH V7 04/10] arm64: exception: handle Synchronous External Abort

2017-01-16 Thread Will Deacon
On Thu, Jan 12, 2017 at 11:15:18AM -0700, Tyler Baicar wrote:
> SEA exceptions are often caused by an uncorrected hardware
> error, and are handled when data abort and instruction abort
> exception classes have specific values for their Fault Status
> Code.
> When SEA occurs, before killing the process, go through
> the handlers registered in the notification list.
> Update fault_info[] with specific SEA faults so that the
> new SEA handler is used.
> 
> Signed-off-by: Tyler Baicar 
> Signed-off-by: Jonathan (Zhixiong) Zhang 
> Signed-off-by: Naveen Kaje 
> ---
>  arch/arm64/include/asm/system_misc.h | 13 
>  arch/arm64/mm/fault.c| 58 
> +---
>  2 files changed, 61 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/system_misc.h 
> b/arch/arm64/include/asm/system_misc.h
> index 57f110b..e7f3440 100644
> --- a/arch/arm64/include/asm/system_misc.h
> +++ b/arch/arm64/include/asm/system_misc.h
> @@ -64,4 +64,17 @@ extern void (*arm_pm_restart)(enum reboot_mode 
> reboot_mode, const char *cmd);
>  
>  #endif   /* __ASSEMBLY__ */
>  
> +/*
> + * The functions below are used to register and unregister callbacks
> + * that are to be invoked when a Synchronous External Abort (SEA)
> + * occurs. An SEA is raised by certain fault status codes that have
> + * either data or instruction abort as the exception class, and
> + * callbacks may be registered to parse or handle such hardware errors.
> + *
> + * Registered callbacks are run in an interrupt/atomic context. They
> + * are not allowed to block or sleep.
> + */
> +int register_sea_notifier(struct notifier_block *nb);
> +void unregister_sea_notifier(struct notifier_block *nb);

I still don't understand why you need notifiers for this. You register
precisely one hook in the series.

>  #endif   /* __ASM_SYSTEM_MISC_H */
> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
> index 05d2bd7..81039c7 100644
> --- a/arch/arm64/mm/fault.c
> +++ b/arch/arm64/mm/fault.c
> @@ -39,6 +39,22 @@
>  #include 
>  #include 
>  
> +/*
> + * GHES SEA handler code may register a notifier call here to
> + * handle HW error record passed from platform.
> + */
> +static ATOMIC_NOTIFIER_HEAD(sea_handler_chain);
> +
> +int register_sea_notifier(struct notifier_block *nb)
> +{
> + return atomic_notifier_chain_register(_handler_chain, nb);
> +}
> +
> +void unregister_sea_notifier(struct notifier_block *nb)
> +{
> + atomic_notifier_chain_unregister(_handler_chain, nb);
> +}
> +
>  static const char *fault_name(unsigned int esr);
>  
>  #ifdef CONFIG_KPROBES
> @@ -480,6 +496,28 @@ static int do_bad(unsigned long addr, unsigned int esr, 
> struct pt_regs *regs)
>   return 1;
>  }
>  
> +/*
> + * This abort handler deals with Synchronous External Abort.
> + * It calls notifiers, and then returns "fault".
> + */
> +static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
> +{
> + struct siginfo info;
> +
> + atomic_notifier_call_chain(_handler_chain, 0, NULL);
> +
> + pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
> +  fault_name(esr), esr, addr);
> +
> + info.si_signo = SIGBUS;
> + info.si_errno = 0;
> + info.si_code  = 0;
> + info.si_addr  = (void __user *)addr;
> + arm64_notify_die("", regs, , esr);
> +
> + return 0;
> +}
> +
>  static const struct fault_info {
>   int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs 
> *regs);
>   int sig;
> @@ -502,22 +540,22 @@ static const struct fault_info {
>   { do_page_fault,SIGSEGV, SEGV_ACCERR,   "level 1 permission 
> fault"  },
>   { do_page_fault,SIGSEGV, SEGV_ACCERR,   "level 2 permission 
> fault"  },
>   { do_page_fault,SIGSEGV, SEGV_ACCERR,   "level 3 permission 
> fault"  },
> - { do_bad,   SIGBUS,  0, "synchronous external 
> abort"},
> + { do_sea,   SIGBUS,  0, "synchronous external 
> abort"},
>   { do_bad,   SIGBUS,  0, "unknown 17"
> },
>   { do_bad,   SIGBUS,  0, "unknown 18"
> },
>   { do_bad,   SIGBUS,  0, "unknown 19"
> },
> - { do_bad,   SIGBUS,  0, "synchronous abort 
> (translation table walk)" },
> - { do_bad,   SIGBUS,  0, "synchronous abort 
> (translation table walk)" },
> - { do_bad,   SIGBUS,  0, "synchronous abort 
> (translation table walk)" },
> - { do_bad,   SIGBUS,  0, "synchronous abort 
> (translation table walk)" },
> - { do_bad,   SIGBUS,  0, "synchronous parity 
> error"  },
> + { do_sea,   SIGBUS,  0, "level 0 

Re: [Qemu-devel] [PATCH RFC 0/6] target-arm: KVM64: Cross type vCPU support

2017-01-16 Thread no-reply
Hi,

Your series seems to have some coding style problems. See output below for
more information:

Type: series
Message-id: 1484558821-15512-1-git-send-email-zhaoshengl...@huawei.com
Subject: [Qemu-devel] [PATCH RFC 0/6] target-arm: KVM64: Cross type vCPU support

=== TEST SCRIPT BEGIN ===
#!/bin/bash

BASE=base
n=1
total=$(git log --oneline $BASE.. | wc -l)
failed=0

# Useful git options
git config --local diff.renamelimit 0
git config --local diff.renames True

commits="$(git log --format=%H --reverse $BASE..)"
for c in $commits; do
echo "Checking PATCH $n/$total: $(git log -n 1 --format=%s $c)..."
if ! git show $c --format=email | ./scripts/checkpatch.pl --mailback -; then
failed=1
echo
fi
n=$((n+1))
done

exit $failed
=== TEST SCRIPT END ===

Updating 3c8cf5a9c21ff8782164d1def7f44bd888713384
>From https://github.com/patchew-project/qemu
 * [new tag] 
patchew/1484558821-15512-1-git-send-email-zhaoshengl...@huawei.com -> 
patchew/1484558821-15512-1-git-send-email-zhaoshengl...@huawei.com
Switched to a new branch 'test'
ad5e87b target-arm: cpu64: Add support for Cortex-A72
d4f6c86 arm: virt: Enable generic type CPU in virt machine
db2426e target: arm: Add a generic type cpu
042dff8 arm: kvm64: Check if kvm supports cross type vCPU
d8b8db1 target: arm: Add the qemu target for KVM_ARM_TARGET_GENERIC_V8
71eb07d headers: update linux headers

=== OUTPUT BEGIN ===
Checking PATCH 1/6: headers: update linux headers...
Checking PATCH 2/6: target: arm: Add the qemu target for 
KVM_ARM_TARGET_GENERIC_V8...
Checking PATCH 3/6: arm: kvm64: Check if kvm supports cross type vCPU...
ERROR: Macros with complex values should be enclosed in parenthesis
#21: FILE: target/arm/kvm64.c:484:
+#define ARM_CPU_ID_MIDR3, 0, 0, 0, 0

ERROR: Macros with complex values should be enclosed in parenthesis
#24: FILE: target/arm/kvm64.c:487:
+#define ARM_CPU_ID_REVIDR  3, 0, 0, 0, 6

ERROR: Macros with complex values should be enclosed in parenthesis
#25: FILE: target/arm/kvm64.c:488:
+#define ARM_CPU_ID_AIDR3, 1, 0, 0, 7

ERROR: Macros with complex values should be enclosed in parenthesis
#28: FILE: target/arm/kvm64.c:491:
+#define ARM_CPU_ID_CCSIDR  3, 1, 0, 0, 0

ERROR: Macros with complex values should be enclosed in parenthesis
#29: FILE: target/arm/kvm64.c:492:
+#define ARM_CPU_ID_CLIDR   3, 1, 0, 0, 1

ERROR: Macros with complex values should be enclosed in parenthesis
#30: FILE: target/arm/kvm64.c:493:
+#define ARM_CPU_ID_CSSELR  3, 2, 0, 0, 0

ERROR: Macros with complex values should be enclosed in parenthesis
#31: FILE: target/arm/kvm64.c:494:
+#define ARM_CPU_ID_CTR 3, 3, 0, 0, 1

ERROR: Macros with complex values should be enclosed in parenthesis
#34: FILE: target/arm/kvm64.c:497:
+#define ARM_CPU_ID_PFR03, 0, 0, 1, 0

ERROR: Macros with complex values should be enclosed in parenthesis
#35: FILE: target/arm/kvm64.c:498:
+#define ARM_CPU_ID_PFR13, 0, 0, 1, 1

ERROR: Macros with complex values should be enclosed in parenthesis
#36: FILE: target/arm/kvm64.c:499:
+#define ARM_CPU_ID_DFR03, 0, 0, 1, 2

ERROR: Macros with complex values should be enclosed in parenthesis
#37: FILE: target/arm/kvm64.c:500:
+#define ARM_CPU_ID_AFR03, 0, 0, 1, 3

ERROR: Macros with complex values should be enclosed in parenthesis
#38: FILE: target/arm/kvm64.c:501:
+#define ARM_CPU_ID_MMFR0   3, 0, 0, 1, 4

ERROR: Macros with complex values should be enclosed in parenthesis
#39: FILE: target/arm/kvm64.c:502:
+#define ARM_CPU_ID_MMFR1   3, 0, 0, 1, 5

ERROR: Macros with complex values should be enclosed in parenthesis
#40: FILE: target/arm/kvm64.c:503:
+#define ARM_CPU_ID_MMFR2   3, 0, 0, 1, 6

ERROR: Macros with complex values should be enclosed in parenthesis
#41: FILE: target/arm/kvm64.c:504:
+#define ARM_CPU_ID_MMFR3   3, 0, 0, 1, 7

ERROR: Macros with complex values should be enclosed in parenthesis
#42: FILE: target/arm/kvm64.c:505:
+#define ARM_CPU_ID_ISAR0   3, 0, 0, 2, 0

ERROR: Macros with complex values should be enclosed in parenthesis
#43: FILE: target/arm/kvm64.c:506:
+#define ARM_CPU_ID_ISAR1   3, 0, 0, 2, 1

ERROR: Macros with complex values should be enclosed in parenthesis
#44: FILE: target/arm/kvm64.c:507:
+#define ARM_CPU_ID_ISAR2   3, 0, 0, 2, 2

ERROR: Macros with complex values should be enclosed in parenthesis
#45: FILE: target/arm/kvm64.c:508:
+#define ARM_CPU_ID_ISAR3   3, 0, 0, 2, 3

ERROR: Macros with complex values should be enclosed in parenthesis
#46: FILE: target/arm/kvm64.c:509:
+#define ARM_CPU_ID_ISAR4   3, 0, 0, 2, 4

ERROR: Macros with complex values should be enclosed in parenthesis
#47: FILE: target/arm/kvm64.c:510:
+#define ARM_CPU_ID_ISAR5   3, 0, 0, 2, 5

ERROR: Macros with complex values should be enclosed in parenthesis
#48: FILE: target/arm/kvm64.c:511:
+#define ARM_CPU_ID_MMFR4   3, 0, 0, 2, 6

ERROR: Macros with complex values should be enclosed in 

[PATCH RFC 0/7] ARM64: KVM: Cross type vCPU support

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

This patch set adds support for Cross type vCPU in KVM-ARM64. It allows
userspace to request a different vCPU type with the physical ones and
check whether the physical CPUs could support that specific vCPU. If so,
KVM will trap the ID registers and return guest with the values from
usersapce.

This patch set is not complete since the CPU Errata is not considered
and currently it only checks if the id_aa64mmfr0_el1 register value is
legal. I want this as an example and need some feedback from folks if
this approach is right or proper.

You can test this patch set with QEMU using
-cpu cortex-a53/cortex-a57/generic/cortex-a72

These patches can be fetched from:
https://git.linaro.org/people/shannon.zhao/linux-mainline.git cross_vcpu_rfc

You corresponding QEMU patches can be fetched from:
https://git.linaro.org/people/shannon.zhao/qemu.git cross_vcpu_rfc

Thanks,
Shannon

Shannon Zhao (7):
  ARM64: KVM: Add the definition of ID registers
  ARM64: KVM: Add reset handlers for all ID registers
  ARM64: KVM: Reset ID registers when creating the VCPUs
  ARM64: KVM: emulate accessing ID registers
  ARM64: KVM: Support cross type vCPU
  ARM64: KVM: Support heterogeneous system
  ARM64: KVM: Add user set handler for id_aa64mmfr0_el1

 arch/arm/kvm/arm.c   |  36 -
 arch/arm64/include/asm/kvm_coproc.h  |   1 +
 arch/arm64/include/asm/kvm_emulate.h |   3 +
 arch/arm64/include/asm/kvm_host.h|  49 +-
 arch/arm64/include/uapi/asm/kvm.h|   1 +
 arch/arm64/kvm/guest.c   |  18 ++-
 arch/arm64/kvm/hyp/sysreg-sr.c   |   2 +
 arch/arm64/kvm/sys_regs.c| 290 +++
 include/uapi/linux/kvm.h |   2 +
 9 files changed, 296 insertions(+), 106 deletions(-)

-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 3/7] ARM64: KVM: Reset ID registers when creating the VCPUs

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Reset ID registers when creating the VCPUs and store the values per
VCPU. Also modify the get_invariant_sys_reg and set_invariant_sys_reg
to get/set the ID register from vcpu context.

Signed-off-by: Shannon Zhao 
---
 arch/arm64/include/asm/kvm_coproc.h |  1 +
 arch/arm64/kvm/guest.c  |  1 +
 arch/arm64/kvm/sys_regs.c   | 58 ++---
 3 files changed, 31 insertions(+), 29 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_coproc.h 
b/arch/arm64/include/asm/kvm_coproc.h
index 0b52377..0801b66 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -24,6 +24,7 @@
 #include 
 
 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
+void kvm_reset_id_sys_regs(struct kvm_vcpu *vcpu);
 
 struct kvm_sys_reg_table {
const struct sys_reg_desc *table;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index b37446a..92abe2b 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -48,6 +48,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
+   kvm_reset_id_sys_regs(vcpu);
return 0;
 }
 
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index bf71eb4..7c5fa03 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1440,11 +1440,11 @@ static const struct sys_reg_desc cp15_64_regs[] = {
  * the guest, or a future kvm may trap them.
  */
 
-#define FUNCTION_INVARIANT(reg)
\
-   static void get_##reg(struct kvm_vcpu *v,   \
- const struct sys_reg_desc *r) \
+#define FUNCTION_INVARIANT(register)   \
+   static void get_##register(struct kvm_vcpu *v,  \
+  const struct sys_reg_desc *r)\
{   \
-   ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
+   vcpu_id_sys_reg(v, r->reg) = read_sysreg(register); \
}
 
 FUNCTION_INVARIANT(midr_el1)
@@ -1480,7 +1480,6 @@ FUNCTION_INVARIANT(id_aa64mmfr1_el1)
 FUNCTION_INVARIANT(clidr_el1)
 FUNCTION_INVARIANT(aidr_el1)
 
-/* ->val is filled in by kvm_sys_reg_table_init() */
 static struct sys_reg_desc invariant_sys_regs[] = {
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b), Op2(0b000),
  NULL, get_midr_el1, MIDR_EL1 },
@@ -1952,43 +1951,43 @@ static int reg_to_user(void __user *uaddr, const u64 
*val, u64 id)
return 0;
 }
 
-static int get_invariant_sys_reg(u64 id, void __user *uaddr)
+static int get_invariant_sys_reg(struct kvm_vcpu *vcpu,
+const struct kvm_one_reg *reg)
 {
struct sys_reg_params params;
const struct sys_reg_desc *r;
+   void __user *uaddr = (void __user *)(unsigned long)reg->addr;
 
-   if (!index_to_params(id, ))
+   if (!index_to_params(reg->id, ))
return -ENOENT;
 
r = find_reg(, invariant_sys_regs, 
ARRAY_SIZE(invariant_sys_regs));
if (!r)
return -ENOENT;
 
-   return reg_to_user(uaddr, >val, id);
+   if (r->get_user)
+   return (r->get_user)(vcpu, r, reg, uaddr);
+
+   return reg_to_user(uaddr, _id_sys_reg(vcpu, r->reg), reg->id);
 }
 
-static int set_invariant_sys_reg(u64 id, void __user *uaddr)
+static int set_invariant_sys_reg(struct kvm_vcpu *vcpu,
+const struct kvm_one_reg *reg)
 {
struct sys_reg_params params;
const struct sys_reg_desc *r;
-   int err;
-   u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
+   void __user *uaddr = (void __user *)(unsigned long)reg->addr;
 
-   if (!index_to_params(id, ))
+   if (!index_to_params(reg->id, ))
return -ENOENT;
r = find_reg(, invariant_sys_regs, 
ARRAY_SIZE(invariant_sys_regs));
if (!r)
return -ENOENT;
 
-   err = reg_from_user(, uaddr, id);
-   if (err)
-   return err;
-
-   /* This is what we mean by invariant: you can't change it. */
-   if (r->val != val)
-   return -EINVAL;
+   if (r->set_user)
+   return (r->set_user)(vcpu, r, reg, uaddr);
 
-   return 0;
+   return reg_from_user(_id_sys_reg(vcpu, r->reg), uaddr, reg->id);
 }
 
 static bool is_valid_cache(u32 val)
@@ -2086,7 +2085,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const 
struct kvm_one_reg *reg
 
r = index_to_sys_reg_desc(vcpu, reg->id);
if (!r)
-   return get_invariant_sys_reg(reg->id, uaddr);
+   return get_invariant_sys_reg(vcpu, reg);
 
if (r->get_user)
return (r->get_user)(vcpu, r, reg, uaddr);
@@ -2107,7 +2106,7 @@ int 

[PATCH RFC 6/7] ARM64: KVM: Support heterogeneous system

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

When initializing KVM, check whether physical hardware is a
heterogeneous system through the MIDR values. If so, force userspace to
set the KVM_ARM_VCPU_CROSS feature bit. Otherwise, it should fail to
initialize VCPUs.

Signed-off-by: Shannon Zhao 
---
 arch/arm/kvm/arm.c   | 26 ++
 include/uapi/linux/kvm.h |  1 +
 2 files changed, 27 insertions(+)

diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index bdceb19..21ec070 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -46,6 +46,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #ifdef REQUIRES_VIRT
 __asm__(".arch_extension   virt");
@@ -65,6 +66,7 @@ static unsigned int kvm_vmid_bits __read_mostly;
 static DEFINE_SPINLOCK(kvm_vmid_lock);
 
 static bool vgic_present;
+static bool heterogeneous_system;
 
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
 
@@ -210,6 +212,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_CROSS_VCPU:
r = 1;
break;
+   case KVM_CAP_ARM_HETEROGENEOUS:
+   r = heterogeneous_system;
+   break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
@@ -812,6 +817,12 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
int phys_target = kvm_target_cpu();
bool cross_vcpu = kvm_vcpu_has_feature_cross_cpu(init);
 
+   if (heterogeneous_system && !cross_vcpu) {
+   kvm_err("%s:Host is a heterogeneous system, set 
KVM_ARM_VCPU_CROSS bit\n",
+   __func__);
+   return -EINVAL;
+   }
+
if (!cross_vcpu && init->target != phys_target)
return -EINVAL;
 
@@ -1397,6 +1408,11 @@ static void check_kvm_target_cpu(void *ret)
*(int *)ret = kvm_target_cpu();
 }
 
+static void get_physical_cpu_midr(void *midr)
+{
+   *(u32 *)midr = read_cpuid_id();
+}
+
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
 {
struct kvm_vcpu *vcpu;
@@ -1417,6 +1433,7 @@ int kvm_arch_init(void *opaque)
 {
int err;
int ret, cpu;
+   u32 current_midr, midr;
 
if (!is_hyp_mode_available()) {
kvm_err("HYP mode not available\n");
@@ -1431,6 +1448,15 @@ int kvm_arch_init(void *opaque)
}
}
 
+   current_midr = read_cpuid_id();
+   for_each_online_cpu(cpu) {
+   smp_call_function_single(cpu, get_physical_cpu_midr, , 1);
+   if (current_midr != midr) {
+   heterogeneous_system = true;
+   break;
+   }
+   }
+
err = init_common_resources();
if (err)
return err;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 46115a2..cc2b63d 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -872,6 +872,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_MSI_DEVID 131
 #define KVM_CAP_PPC_HTM 132
 #define KVM_CAP_ARM_CROSS_VCPU 133
+#define KVM_CAP_ARM_HETEROGENEOUS 134
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 1/7] ARM64: KVM: Add the definition of ID registers

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Add a new memeber in kvm_cpu_context to save the ID registers value.

Signed-off-by: Shannon Zhao 
---
 arch/arm64/include/asm/kvm_host.h | 46 +++
 1 file changed, 46 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index e505038..6034f92 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -187,12 +187,57 @@ enum vcpu_sysreg {
 
 #define NR_COPRO_REGS  (NR_SYS_REGS * 2)
 
+enum id_vcpu_sysreg {
+   MIDR_EL1,
+   /* ID group 1 registers */
+   REVIDR_EL1,
+   AIDR_EL1,
+
+   /* ID group 2 registers */
+   CTR_EL0,
+   CCSIDR_EL1,
+   CLIDR_EL1,
+
+   /* ID group 3 registers */
+   ID_PFR0_EL1,
+   ID_PFR1_EL1,
+   ID_DFR0_EL1,
+   ID_AFR0_EL1,
+   ID_MMFR0_EL1,
+   ID_MMFR1_EL1,
+   ID_MMFR2_EL1,
+   ID_MMFR3_EL1,
+   ID_ISAR0_EL1,
+   ID_ISAR1_EL1,
+   ID_ISAR2_EL1,
+   ID_ISAR3_EL1,
+   ID_ISAR4_EL1,
+   ID_ISAR5_EL1,
+   MVFR0_EL1,
+   MVFR1_EL1,
+   MVFR2_EL1,
+   ID_AA64PFR0_EL1,
+   ID_AA64PFR1_EL1,
+   ID_AA64DFR0_EL1,
+   ID_AA64DFR1_EL1,
+   ID_AA64ISAR0_EL1,
+   ID_AA64ISAR1_EL1,
+   ID_AA64MMFR0_EL1,
+   ID_AA64MMFR1_EL1,
+   ID_AA64AFR0_EL1,
+   ID_AA64AFR1_EL1,
+   ID_MMFR4_EL1,
+
+   NR_ID_SYS_REGS
+};
+
 struct kvm_cpu_context {
struct kvm_regs gp_regs;
union {
u64 sys_regs[NR_SYS_REGS];
u32 copro[NR_COPRO_REGS];
};
+   u64 id_sys_regs[NR_ID_SYS_REGS];
 };
 
 typedef struct kvm_cpu_context kvm_cpu_context_t;
@@ -277,6 +322,7 @@ struct kvm_vcpu_arch {
 
 #define vcpu_gp_regs(v)(&(v)->arch.ctxt.gp_regs)
 #define vcpu_sys_reg(v,r)  ((v)->arch.ctxt.sys_regs[(r)])
+#define vcpu_id_sys_reg(v,r)   ((v)->arch.ctxt.id_sys_regs[(r)])
 /*
  * CP14 and CP15 live in the same array, as they are backed by the
  * same system registers.
-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 5/7] ARM64: KVM: Support cross type vCPU

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Add a capability to tell userspace that KVM supports cross type vCPU.
Add a cpu feature for userspace to set when it doesn't use host type
vCPU and kvm_vcpu_preferred_target return the host MIDR register value
so that userspace can check whether its requested vCPU type macthes the
one of physical CPU and if so, KVM will not trap ID registers even
though userspace doesn't specify -cpu host.
Guest accesses MIDR through VPIDR_EL2 so we save/restore it no matter
it's a cross type vCPU.

Signed-off-by: Shannon Zhao 
---
 arch/arm/kvm/arm.c   | 10 --
 arch/arm64/include/asm/kvm_emulate.h |  3 +++
 arch/arm64/include/asm/kvm_host.h|  3 ++-
 arch/arm64/include/uapi/asm/kvm.h|  1 +
 arch/arm64/kvm/guest.c   | 17 -
 arch/arm64/kvm/hyp/sysreg-sr.c   |  2 ++
 include/uapi/linux/kvm.h |  1 +
 7 files changed, 33 insertions(+), 4 deletions(-)

diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 1167678..bdceb19 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -207,6 +207,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_PSCI_0_2:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_MP_STATE:
+   case KVM_CAP_ARM_CROSS_VCPU:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -809,8 +810,9 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 {
unsigned int i;
int phys_target = kvm_target_cpu();
+   bool cross_vcpu = kvm_vcpu_has_feature_cross_cpu(init);
 
-   if (init->target != phys_target)
+   if (!cross_vcpu && init->target != phys_target)
return -EINVAL;
 
/*
@@ -839,7 +841,11 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
set_bit(i, vcpu->arch.features);
}
 
-   vcpu->arch.target = phys_target;
+   if (!cross_vcpu)
+   vcpu->arch.target = phys_target;
+   else
+   /* Use generic ARMv8 target for cross type vcpu. */
+   vcpu->arch.target = KVM_ARM_TARGET_GENERIC_V8;
 
/* Now we know what it is, we can reset it. */
return kvm_reset_vcpu(vcpu);
diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index f5ea0ba..bca7d3a 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -49,6 +49,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_E2H;
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW;
+   if (test_bit(KVM_ARM_VCPU_CROSS, vcpu->arch.features))
+   /* TODO: Set HCR_TID2 and trap cache registers */
+   vcpu->arch.hcr_el2 |= HCR_TID3 | HCR_TID1 | HCR_TID0;
 }
 
 static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 6034f92..d0073d7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -41,10 +41,11 @@
 
 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
 
-#define KVM_VCPU_MAX_FEATURES 4
+#define KVM_VCPU_MAX_FEATURES 5
 
 #define KVM_REQ_VCPU_EXIT  8
 
+bool kvm_vcpu_has_feature_cross_cpu(const struct kvm_vcpu_init *init);
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
diff --git a/arch/arm64/include/uapi/asm/kvm.h 
b/arch/arm64/include/uapi/asm/kvm.h
index 3051f86..7ba7117 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -97,6 +97,7 @@ struct kvm_regs {
 #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
 #define KVM_ARM_VCPU_PSCI_0_2  2 /* CPU uses PSCI v0.2 */
 #define KVM_ARM_VCPU_PMU_V33 /* Support guest PMUv3 */
+#define KVM_ARM_VCPU_CROSS 4 /* Support cross type vCPU */
 
 struct kvm_vcpu_init {
__u32 target;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 92abe2b..4a5ccab 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -308,8 +308,15 @@ int __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_GENERIC_V8;
 }
 
+bool kvm_vcpu_has_feature_cross_cpu(const struct kvm_vcpu_init *init)
+{
+   return init->features[KVM_ARM_VCPU_CROSS / 32] &
+  (1 << (KVM_ARM_VCPU_CROSS % 32));
+}
+
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
 {
+   bool cross_vcpu = kvm_vcpu_has_feature_cross_cpu(init);
int target = kvm_target_cpu();
 
if (target < 0)
@@ -323,7 +330,15 @@ int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
 * specific features available for the preferred
 * target type.
 */
-   init->target = (__u32)target;
+   /* If 

[PATCH RFC 7/7] ARM64: KVM: Add user set handler for id_aa64mmfr0_el1

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Check if the configuration is fine.

Signed-off-by: Shannon Zhao 
---
 arch/arm64/kvm/sys_regs.c | 32 +++-
 1 file changed, 31 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f613e29..9763b79 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1493,6 +1493,35 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
return true;
 }
 
+static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
+   const struct sys_reg_desc *rd,
+   const struct kvm_one_reg *reg,
+   void __user *uaddr)
+{
+   u64 val, id_aa64mmfr0;
+
+   if (copy_from_user(, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+   return -EFAULT;
+
+   asm volatile("mrs %0, id_aa64mmfr0_el1\n" : "=r" (id_aa64mmfr0));
+
+   if ((val & GENMASK(3, 0)) > (id_aa64mmfr0 & GENMASK(3, 0)) ||
+   (val & GENMASK(7, 4)) > (id_aa64mmfr0 & GENMASK(7, 4)) ||
+   (val & GENMASK(11, 8)) > (id_aa64mmfr0 & GENMASK(11, 8)) ||
+   (val & GENMASK(15, 12)) > (id_aa64mmfr0 & GENMASK(15, 12)) ||
+   (val & GENMASK(19, 16)) > (id_aa64mmfr0 & GENMASK(19, 16)) ||
+   (val & GENMASK(23, 20)) > (id_aa64mmfr0 & GENMASK(23, 20)) ||
+   (val & GENMASK(27, 24)) < (id_aa64mmfr0 & GENMASK(27, 24)) ||
+   (val & GENMASK(31, 28)) < (id_aa64mmfr0 & GENMASK(31, 28))) {
+   kvm_err("Wrong memory translation granule size/Physical Address 
range\n");
+   return -EINVAL;
+   }
+
+   vcpu_id_sys_reg(vcpu, rd->reg) = val & GENMASK(31, 0);
+
+   return 0;
+}
+
 static struct sys_reg_desc invariant_sys_regs[] = {
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b), Op2(0b000),
  access_id_reg, get_midr_el1, MIDR_EL1 },
@@ -1549,7 +1578,8 @@ static struct sys_reg_desc invariant_sys_regs[] = {
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0110), Op2(0b001),
  access_id_reg, get_id_aa64isar1_el1, ID_AA64ISAR1_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0111), Op2(0b000),
- access_id_reg, get_id_aa64mmfr0_el1, ID_AA64MMFR0_EL1 },
+ access_id_reg, get_id_aa64mmfr0_el1, ID_AA64MMFR0_EL1,
+ 0, NULL, set_id_aa64mmfr0_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0111), Op2(0b001),
  access_id_reg, get_id_aa64mmfr1_el1, ID_AA64MMFR1_EL1 },
{ Op0(0b11), Op1(0b001), CRn(0b), CRm(0b), Op2(0b001),
-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 2/7] ARM64: KVM: Add reset handlers for all ID registers

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Move invariant_sys_regs before emulate_sys_reg so that it can be used
later.

Signed-off-by: Shannon Zhao 
---
 arch/arm64/kvm/sys_regs.c | 193 --
 1 file changed, 116 insertions(+), 77 deletions(-)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87e7e66..bf71eb4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1432,6 +1432,122 @@ static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
 };
 
+/*
+ * These are the invariant sys_reg registers: we let the guest see the
+ * host versions of these, so they're part of the guest state.
+ *
+ * A future CPU may provide a mechanism to present different values to
+ * the guest, or a future kvm may trap them.
+ */
+
+#define FUNCTION_INVARIANT(reg)
\
+   static void get_##reg(struct kvm_vcpu *v,   \
+ const struct sys_reg_desc *r) \
+   {   \
+   ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
+   }
+
+FUNCTION_INVARIANT(midr_el1)
+FUNCTION_INVARIANT(ctr_el0)
+FUNCTION_INVARIANT(revidr_el1)
+FUNCTION_INVARIANT(id_pfr0_el1)
+FUNCTION_INVARIANT(id_pfr1_el1)
+FUNCTION_INVARIANT(id_dfr0_el1)
+FUNCTION_INVARIANT(id_afr0_el1)
+FUNCTION_INVARIANT(id_mmfr0_el1)
+FUNCTION_INVARIANT(id_mmfr1_el1)
+FUNCTION_INVARIANT(id_mmfr2_el1)
+FUNCTION_INVARIANT(id_mmfr3_el1)
+FUNCTION_INVARIANT(id_isar0_el1)
+FUNCTION_INVARIANT(id_isar1_el1)
+FUNCTION_INVARIANT(id_isar2_el1)
+FUNCTION_INVARIANT(id_isar3_el1)
+FUNCTION_INVARIANT(id_isar4_el1)
+FUNCTION_INVARIANT(id_isar5_el1)
+FUNCTION_INVARIANT(mvfr0_el1)
+FUNCTION_INVARIANT(mvfr1_el1)
+FUNCTION_INVARIANT(mvfr2_el1)
+FUNCTION_INVARIANT(id_aa64pfr0_el1)
+FUNCTION_INVARIANT(id_aa64pfr1_el1)
+FUNCTION_INVARIANT(id_aa64dfr0_el1)
+FUNCTION_INVARIANT(id_aa64dfr1_el1)
+FUNCTION_INVARIANT(id_aa64afr0_el1)
+FUNCTION_INVARIANT(id_aa64afr1_el1)
+FUNCTION_INVARIANT(id_aa64isar0_el1)
+FUNCTION_INVARIANT(id_aa64isar1_el1)
+FUNCTION_INVARIANT(id_aa64mmfr0_el1)
+FUNCTION_INVARIANT(id_aa64mmfr1_el1)
+FUNCTION_INVARIANT(clidr_el1)
+FUNCTION_INVARIANT(aidr_el1)
+
+/* ->val is filled in by kvm_sys_reg_table_init() */
+static struct sys_reg_desc invariant_sys_regs[] = {
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b), Op2(0b000),
+ NULL, get_midr_el1, MIDR_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b), Op2(0b110),
+ NULL, get_revidr_el1, REVIDR_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b000),
+ NULL, get_id_pfr0_el1, ID_PFR0_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b001),
+ NULL, get_id_pfr1_el1, ID_PFR1_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b010),
+ NULL, get_id_dfr0_el1, ID_DFR0_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b011),
+ NULL, get_id_afr0_el1, ID_AFR0_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b100),
+ NULL, get_id_mmfr0_el1, ID_MMFR0_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b101),
+ NULL, get_id_mmfr1_el1, ID_MMFR1_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b110),
+ NULL, get_id_mmfr2_el1, ID_MMFR2_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b111),
+ NULL, get_id_mmfr3_el1, ID_MMFR3_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b000),
+ NULL, get_id_isar0_el1, ID_ISAR0_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b001),
+ NULL, get_id_isar1_el1, ID_ISAR1_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b010),
+ NULL, get_id_isar2_el1, ID_ISAR2_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b011),
+ NULL, get_id_isar3_el1, ID_ISAR3_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b100),
+ NULL, get_id_isar4_el1, ID_ISAR4_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b101),
+ NULL, get_id_isar5_el1, ID_ISAR5_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0011), Op2(0b000),
+ NULL, get_mvfr0_el1, MVFR0_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0011), Op2(0b001),
+ NULL, get_mvfr1_el1, MVFR1_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0011), Op2(0b010),
+ NULL, get_mvfr2_el1, MVFR2_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0100), Op2(0b000),
+ NULL, get_id_aa64pfr0_el1, ID_AA64PFR0_EL1 },
+   { Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0100), Op2(0b001),
+ NULL, get_id_aa64pfr1_el1, 

[PATCH RFC 4/7] ARM64: KVM: emulate accessing ID registers

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Signed-off-by: Shannon Zhao 
---
 arch/arm64/kvm/sys_regs.c | 83 ---
 1 file changed, 50 insertions(+), 33 deletions(-)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7c5fa03..f613e29 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1480,71 +1480,84 @@ FUNCTION_INVARIANT(id_aa64mmfr1_el1)
 FUNCTION_INVARIANT(clidr_el1)
 FUNCTION_INVARIANT(aidr_el1)
 
+static bool access_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+   if (p->is_write) {
+   vcpu_id_sys_reg(vcpu, r->reg) = p->regval;
+   } else {
+   p->regval = vcpu_id_sys_reg(vcpu, r->reg);
+   }
+
+   return true;
+}
+
 static struct sys_reg_desc invariant_sys_regs[] = {
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b), Op2(0b000),
- NULL, get_midr_el1, MIDR_EL1 },
+ access_id_reg, get_midr_el1, MIDR_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b), Op2(0b110),
- NULL, get_revidr_el1, REVIDR_EL1 },
+ access_id_reg, get_revidr_el1, REVIDR_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b000),
- NULL, get_id_pfr0_el1, ID_PFR0_EL1 },
+ access_id_reg, get_id_pfr0_el1, ID_PFR0_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b001),
- NULL, get_id_pfr1_el1, ID_PFR1_EL1 },
+ access_id_reg, get_id_pfr1_el1, ID_PFR1_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b010),
- NULL, get_id_dfr0_el1, ID_DFR0_EL1 },
+ access_id_reg, get_id_dfr0_el1, ID_DFR0_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b011),
- NULL, get_id_afr0_el1, ID_AFR0_EL1 },
+ access_id_reg, get_id_afr0_el1, ID_AFR0_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b100),
- NULL, get_id_mmfr0_el1, ID_MMFR0_EL1 },
+ access_id_reg, get_id_mmfr0_el1, ID_MMFR0_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b101),
- NULL, get_id_mmfr1_el1, ID_MMFR1_EL1 },
+ access_id_reg, get_id_mmfr1_el1, ID_MMFR1_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b110),
- NULL, get_id_mmfr2_el1, ID_MMFR2_EL1 },
+ access_id_reg, get_id_mmfr2_el1, ID_MMFR2_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0001), Op2(0b111),
- NULL, get_id_mmfr3_el1, ID_MMFR3_EL1 },
+ access_id_reg, get_id_mmfr3_el1, ID_MMFR3_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b000),
- NULL, get_id_isar0_el1, ID_ISAR0_EL1 },
+ access_id_reg, get_id_isar0_el1, ID_ISAR0_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b001),
- NULL, get_id_isar1_el1, ID_ISAR1_EL1 },
+ access_id_reg, get_id_isar1_el1, ID_ISAR1_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b010),
- NULL, get_id_isar2_el1, ID_ISAR2_EL1 },
+ access_id_reg, get_id_isar2_el1, ID_ISAR2_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b011),
- NULL, get_id_isar3_el1, ID_ISAR3_EL1 },
+ access_id_reg, get_id_isar3_el1, ID_ISAR3_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b100),
- NULL, get_id_isar4_el1, ID_ISAR4_EL1 },
+ access_id_reg, get_id_isar4_el1, ID_ISAR4_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0010), Op2(0b101),
- NULL, get_id_isar5_el1, ID_ISAR5_EL1 },
+ access_id_reg, get_id_isar5_el1, ID_ISAR5_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0011), Op2(0b000),
- NULL, get_mvfr0_el1, MVFR0_EL1 },
+ access_id_reg, get_mvfr0_el1, MVFR0_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0011), Op2(0b001),
- NULL, get_mvfr1_el1, MVFR1_EL1 },
+ access_id_reg, get_mvfr1_el1, MVFR1_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0011), Op2(0b010),
- NULL, get_mvfr2_el1, MVFR2_EL1 },
+ access_id_reg, get_mvfr2_el1, MVFR2_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0100), Op2(0b000),
- NULL, get_id_aa64pfr0_el1, ID_AA64PFR0_EL1 },
+ access_id_reg, get_id_aa64pfr0_el1, ID_AA64PFR0_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0100), Op2(0b001),
- NULL, get_id_aa64pfr1_el1, ID_AA64PFR1_EL1 },
+ access_id_reg, get_id_aa64pfr1_el1, ID_AA64PFR1_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0101), Op2(0b000),
- NULL, get_id_aa64dfr0_el1, ID_AA64DFR0_EL1 },
+ access_id_reg, get_id_aa64dfr0_el1, ID_AA64DFR0_EL1 },
{ Op0(0b11), Op1(0b000), CRn(0b), CRm(0b0101), Op2(0b001),
- NULL, get_id_aa64dfr1_el1, ID_AA64DFR1_EL1 },
+ 

[PATCH RFC 3/6] arm: kvm64: Check if kvm supports cross type vCPU

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

If user requests a specific type vCPU which is not same with the
physical ones and if kvm supports cross type vCPU, we set the
KVM_ARM_VCPU_CROSS bit and set the CPU ID registers.

Signed-off-by: Shannon Zhao 
---
 target/arm/kvm64.c | 182 +
 1 file changed, 182 insertions(+)

diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 609..70442ea 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -481,7 +481,151 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
 return true;
 }
 
+#define ARM_CPU_ID_MIDR3, 0, 0, 0, 0
 #define ARM_CPU_ID_MPIDR   3, 0, 0, 0, 5
+/* ID group 1 registers */
+#define ARM_CPU_ID_REVIDR  3, 0, 0, 0, 6
+#define ARM_CPU_ID_AIDR3, 1, 0, 0, 7
+
+/* ID group 2 registers */
+#define ARM_CPU_ID_CCSIDR  3, 1, 0, 0, 0
+#define ARM_CPU_ID_CLIDR   3, 1, 0, 0, 1
+#define ARM_CPU_ID_CSSELR  3, 2, 0, 0, 0
+#define ARM_CPU_ID_CTR 3, 3, 0, 0, 1
+
+/* ID group 3 registers */
+#define ARM_CPU_ID_PFR03, 0, 0, 1, 0
+#define ARM_CPU_ID_PFR13, 0, 0, 1, 1
+#define ARM_CPU_ID_DFR03, 0, 0, 1, 2
+#define ARM_CPU_ID_AFR03, 0, 0, 1, 3
+#define ARM_CPU_ID_MMFR0   3, 0, 0, 1, 4
+#define ARM_CPU_ID_MMFR1   3, 0, 0, 1, 5
+#define ARM_CPU_ID_MMFR2   3, 0, 0, 1, 6
+#define ARM_CPU_ID_MMFR3   3, 0, 0, 1, 7
+#define ARM_CPU_ID_ISAR0   3, 0, 0, 2, 0
+#define ARM_CPU_ID_ISAR1   3, 0, 0, 2, 1
+#define ARM_CPU_ID_ISAR2   3, 0, 0, 2, 2
+#define ARM_CPU_ID_ISAR3   3, 0, 0, 2, 3
+#define ARM_CPU_ID_ISAR4   3, 0, 0, 2, 4
+#define ARM_CPU_ID_ISAR5   3, 0, 0, 2, 5
+#define ARM_CPU_ID_MMFR4   3, 0, 0, 2, 6
+#define ARM_CPU_ID_MVFR0   3, 0, 0, 3, 0
+#define ARM_CPU_ID_MVFR1   3, 0, 0, 3, 1
+#define ARM_CPU_ID_MVFR2   3, 0, 0, 3, 2
+#define ARM_CPU_ID_AA64PFR03, 0, 0, 4, 0
+#define ARM_CPU_ID_AA64PFR13, 0, 0, 4, 1
+#define ARM_CPU_ID_AA64DFR03, 0, 0, 5, 0
+#define ARM_CPU_ID_AA64DFR13, 0, 0, 5, 1
+#define ARM_CPU_ID_AA64AFR03, 0, 0, 5, 4
+#define ARM_CPU_ID_AA64AFR13, 0, 0, 5, 5
+#define ARM_CPU_ID_AA64ISAR0   3, 0, 0, 6, 0
+#define ARM_CPU_ID_AA64ISAR1   3, 0, 0, 6, 1
+#define ARM_CPU_ID_AA64MMFR0   3, 0, 0, 7, 0
+#define ARM_CPU_ID_AA64MMFR1   3, 0, 0, 7, 1
+#define ARM_CPU_ID_MAX 36
+
+static int kvm_arm_set_id_registers(CPUState *cs)
+{
+int ret = 0;
+uint32_t i;
+ARMCPU *cpu = ARM_CPU(cs);
+struct kvm_one_reg id_regitsers[ARM_CPU_ID_MAX];
+
+memset(id_regitsers, 0, ARM_CPU_ID_MAX * sizeof(struct kvm_one_reg));
+
+id_regitsers[0].id = ARM64_SYS_REG(ARM_CPU_ID_MIDR);
+id_regitsers[0].addr = (uintptr_t)>midr;
+
+id_regitsers[1].id = ARM64_SYS_REG(ARM_CPU_ID_REVIDR);
+id_regitsers[1].addr = (uintptr_t)>revidr;
+
+id_regitsers[2].id = ARM64_SYS_REG(ARM_CPU_ID_MVFR0);
+id_regitsers[2].addr = (uintptr_t)>mvfr0;
+
+id_regitsers[3].id = ARM64_SYS_REG(ARM_CPU_ID_MVFR1);
+id_regitsers[3].addr = (uintptr_t)>mvfr1;
+
+id_regitsers[4].id = ARM64_SYS_REG(ARM_CPU_ID_MVFR2);
+id_regitsers[4].addr = (uintptr_t)>mvfr2;
+
+id_regitsers[5].id = ARM64_SYS_REG(ARM_CPU_ID_PFR0);
+id_regitsers[5].addr = (uintptr_t)>id_pfr0;
+
+id_regitsers[6].id = ARM64_SYS_REG(ARM_CPU_ID_PFR1);
+id_regitsers[6].addr = (uintptr_t)>id_pfr1;
+
+id_regitsers[7].id = ARM64_SYS_REG(ARM_CPU_ID_DFR0);
+id_regitsers[7].addr = (uintptr_t)>id_dfr0;
+
+id_regitsers[8].id = ARM64_SYS_REG(ARM_CPU_ID_AFR0);
+id_regitsers[8].addr = (uintptr_t)>id_afr0;
+
+id_regitsers[9].id = ARM64_SYS_REG(ARM_CPU_ID_MMFR0);
+id_regitsers[9].addr = (uintptr_t)>id_mmfr0;
+
+id_regitsers[10].id = ARM64_SYS_REG(ARM_CPU_ID_MMFR1);
+id_regitsers[10].addr = (uintptr_t)>id_mmfr1;
+
+id_regitsers[11].id = ARM64_SYS_REG(ARM_CPU_ID_MMFR2);
+id_regitsers[11].addr = (uintptr_t)>id_mmfr2;
+
+id_regitsers[12].id = ARM64_SYS_REG(ARM_CPU_ID_MMFR3);
+id_regitsers[12].addr = (uintptr_t)>id_mmfr3;
+
+id_regitsers[13].id = ARM64_SYS_REG(ARM_CPU_ID_ISAR0);
+id_regitsers[13].addr = (uintptr_t)>id_isar0;
+
+id_regitsers[14].id = ARM64_SYS_REG(ARM_CPU_ID_ISAR1);
+id_regitsers[14].addr = (uintptr_t)>id_isar1;
+
+id_regitsers[15].id = ARM64_SYS_REG(ARM_CPU_ID_ISAR2);
+id_regitsers[15].addr = (uintptr_t)>id_isar2;
+
+id_regitsers[16].id = ARM64_SYS_REG(ARM_CPU_ID_ISAR3);
+id_regitsers[16].addr = (uintptr_t)>id_isar3;
+
+id_regitsers[17].id = ARM64_SYS_REG(ARM_CPU_ID_ISAR4);
+id_regitsers[17].addr = (uintptr_t)>id_isar4;
+
+id_regitsers[18].id = ARM64_SYS_REG(ARM_CPU_ID_ISAR5);
+id_regitsers[18].addr = (uintptr_t)>id_isar5;
+
+id_regitsers[19].id = ARM64_SYS_REG(ARM_CPU_ID_AA64PFR0);
+id_regitsers[19].addr = (uintptr_t)>id_aa64pfr0;
+
+id_regitsers[20].id = ARM64_SYS_REG(ARM_CPU_ID_AA64DFR0);
+id_regitsers[20].addr = 

[PATCH RFC 5/6] arm: virt: Enable generic type CPU in virt machine

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Signed-off-by: Shannon Zhao 
---
 hw/arm/virt.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 4b301c2..49b7b65 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -166,6 +166,7 @@ static const char *valid_cpus[] = {
 "cortex-a15",
 "cortex-a53",
 "cortex-a57",
+"generic",
 "host",
 NULL
 };
-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 0/6] target-arm: KVM64: Cross type vCPU support

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

This patch set support use cross type vCPU when using KVM on ARM and add
two new CPU types: generic and cortex-a72.

You can test this patch set with QEMU using
-cpu cortex-a53/cortex-a57/generic/cortex-a72

These patches can be fetched from:
https://git.linaro.org/people/shannon.zhao/qemu.git cross_vcpu_rfc

You corresponding KVM patches can be fetched from:
https://git.linaro.org/people/shannon.zhao/linux-mainline.git cross_vcpu_rfc

Shannon Zhao (6):
  headers: update linux headers
  target: arm: Add the qemu target for KVM_ARM_TARGET_GENERIC_V8
  arm: kvm64: Check if kvm supports cross type vCPU
  target: arm: Add a generic type cpu
  arm: virt: Enable generic type CPU in virt machine
  target-arm: cpu64: Add support for Cortex-A72

 hw/arm/virt.c |   2 +
 linux-headers/asm-arm64/kvm.h |   1 +
 linux-headers/linux/kvm.h |   2 +
 target/arm/cpu64.c| 110 +
 target/arm/kvm-consts.h   |   2 +
 target/arm/kvm64.c| 182 ++
 6 files changed, 299 insertions(+)

-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 2/6] target: arm: Add the qemu target for KVM_ARM_TARGET_GENERIC_V8

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Signed-off-by: Shannon Zhao 
---
 target/arm/kvm-consts.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h
index a2c9518..fc01ac5 100644
--- a/target/arm/kvm-consts.h
+++ b/target/arm/kvm-consts.h
@@ -128,6 +128,7 @@ MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED)
 #define QEMU_KVM_ARM_TARGET_CORTEX_A57 2
 #define QEMU_KVM_ARM_TARGET_XGENE_POTENZA 3
 #define QEMU_KVM_ARM_TARGET_CORTEX_A53 4
+#define QEMU_KVM_ARM_TARGET_GENERIC_V8 5
 
 /* There's no kernel define for this: sentinel value which
  * matches no KVM target value for either 64 or 32 bit
@@ -140,6 +141,7 @@ MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_FOUNDATION_V8, 
KVM_ARM_TARGET_FOUNDATION_V8)
 MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A57, KVM_ARM_TARGET_CORTEX_A57)
 MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_XGENE_POTENZA, KVM_ARM_TARGET_XGENE_POTENZA)
 MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A53, KVM_ARM_TARGET_CORTEX_A53)
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_GENERIC_V8, KVM_ARM_TARGET_GENERIC_V8)
 #else
 MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15)
 MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A7, KVM_ARM_TARGET_CORTEX_A7)
-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 6/6] target-arm: cpu64: Add support for Cortex-A72

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Add the ARM Cortex-A72 processor definition. It's similar to A57.

Signed-off-by: Shannon Zhao 
---
 hw/arm/virt.c  |  1 +
 target/arm/cpu64.c | 56 ++
 2 files changed, 57 insertions(+)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 49b7b65..2ba93e3 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -166,6 +166,7 @@ static const char *valid_cpus[] = {
 "cortex-a15",
 "cortex-a53",
 "cortex-a57",
+"cortex-a72",
 "generic",
 "host",
 NULL
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 223f31e..4f00ceb 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -204,6 +204,61 @@ static void aarch64_a53_initfn(Object *obj)
 define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
 }
 
+static void aarch64_a72_initfn(Object *obj)
+{
+ARMCPU *cpu = ARM_CPU(obj);
+
+cpu->dtb_compatible = "arm,cortex-a72";
+set_feature(>env, ARM_FEATURE_V8);
+set_feature(>env, ARM_FEATURE_VFP4);
+set_feature(>env, ARM_FEATURE_NEON);
+set_feature(>env, ARM_FEATURE_GENERIC_TIMER);
+set_feature(>env, ARM_FEATURE_AARCH64);
+set_feature(>env, ARM_FEATURE_CBAR_RO);
+set_feature(>env, ARM_FEATURE_V8_AES);
+set_feature(>env, ARM_FEATURE_V8_SHA1);
+set_feature(>env, ARM_FEATURE_V8_SHA256);
+set_feature(>env, ARM_FEATURE_V8_PMULL);
+set_feature(>env, ARM_FEATURE_CRC);
+set_feature(>env, ARM_FEATURE_EL3);
+cpu->kvm_target = QEMU_KVM_ARM_TARGET_GENERIC_V8;
+cpu->midr = 0x410fd081;
+cpu->revidr = 0x;
+cpu->reset_fpsid = 0x41034080;
+cpu->mvfr0 = 0x10110222;
+cpu->mvfr1 = 0x1211;
+cpu->mvfr2 = 0x0043;
+cpu->ctr = 0x8444c004;
+cpu->reset_sctlr = 0x00c50838;
+cpu->id_pfr0 = 0x0131;
+cpu->id_pfr1 = 0x00011011;
+cpu->id_dfr0 = 0x03010066;
+cpu->id_afr0 = 0x;
+cpu->id_mmfr0 = 0x10201105;
+cpu->id_mmfr1 = 0x4000;
+cpu->id_mmfr2 = 0x0126;
+cpu->id_mmfr3 = 0x02102211;
+cpu->id_isar0 = 0x02101110;
+cpu->id_isar1 = 0x13112111;
+cpu->id_isar2 = 0x21232042;
+cpu->id_isar3 = 0x01112131;
+cpu->id_isar4 = 0x00011142;
+cpu->id_isar5 = 0x00011121;
+cpu->id_aa64pfr0 = 0x;
+cpu->id_aa64dfr0 = 0x10305106;
+cpu->pmceid0 = 0x;
+cpu->pmceid1 = 0x;
+cpu->id_aa64isar0 = 0x00011120;
+cpu->id_aa64mmfr0 = 0x1124;
+cpu->dbgdidr = 0x3516d000;
+cpu->clidr = 0x0a200023;
+cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
+cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
+cpu->ccsidr[2] = 0x71ffe07a; /* 4096KB L2 cache */
+cpu->dcz_blocksize = 4; /* 64 bytes */
+define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
+}
+
 static void aarch64_generic_initfn(Object *obj)
 {
 ARMCPU *cpu = ARM_CPU(obj);
@@ -285,6 +340,7 @@ typedef struct ARMCPUInfo {
 static const ARMCPUInfo aarch64_cpus[] = {
 { .name = "cortex-a57", .initfn = aarch64_a57_initfn },
 { .name = "cortex-a53", .initfn = aarch64_a53_initfn },
+{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
 { .name = "generic",.initfn = aarch64_generic_initfn },
 #ifdef CONFIG_USER_ONLY
 { .name = "any", .initfn = aarch64_any_initfn },
-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 1/6] headers: update linux headers

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Signed-off-by: Shannon Zhao 
---
 linux-headers/asm-arm64/kvm.h | 1 +
 linux-headers/linux/kvm.h | 2 ++
 2 files changed, 3 insertions(+)

diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
index fd5a276..f914eac 100644
--- a/linux-headers/asm-arm64/kvm.h
+++ b/linux-headers/asm-arm64/kvm.h
@@ -97,6 +97,7 @@ struct kvm_regs {
 #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
 #define KVM_ARM_VCPU_PSCI_0_2  2 /* CPU uses PSCI v0.2 */
 #define KVM_ARM_VCPU_PMU_V33 /* Support guest PMUv3 */
+#define KVM_ARM_VCPU_CROSS 4 /* Support cross type vCPU */
 
 struct kvm_vcpu_init {
__u32 target;
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index bb0ed71..ea9e288 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -870,6 +870,8 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_S390_USER_INSTR0 130
 #define KVM_CAP_MSI_DEVID 131
 #define KVM_CAP_PPC_HTM 132
+#define KVM_CAP_ARM_CROSS_VCPU 133
+#define KVM_CAP_ARM_HETEROGENEOUS 134
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH RFC 4/6] target: arm: Add a generic type cpu

2017-01-16 Thread Shannon Zhao
From: Shannon Zhao 

Add a generic type cpu, it's useful for migration when running on
different hardwares.

Signed-off-by: Shannon Zhao 
---
 target/arm/cpu64.c | 54 ++
 1 file changed, 54 insertions(+)

diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 549cb1e..223f31e 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -204,6 +204,59 @@ static void aarch64_a53_initfn(Object *obj)
 define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
 }
 
+static void aarch64_generic_initfn(Object *obj)
+{
+ARMCPU *cpu = ARM_CPU(obj);
+
+cpu->dtb_compatible = "arm,armv8";
+set_feature(>env, ARM_FEATURE_V8);
+set_feature(>env, ARM_FEATURE_VFP4);
+set_feature(>env, ARM_FEATURE_NEON);
+set_feature(>env, ARM_FEATURE_GENERIC_TIMER);
+set_feature(>env, ARM_FEATURE_AARCH64);
+set_feature(>env, ARM_FEATURE_CBAR_RO);
+set_feature(>env, ARM_FEATURE_V8_AES);
+set_feature(>env, ARM_FEATURE_V8_SHA1);
+set_feature(>env, ARM_FEATURE_V8_SHA256);
+set_feature(>env, ARM_FEATURE_V8_PMULL);
+set_feature(>env, ARM_FEATURE_CRC);
+set_feature(>env, ARM_FEATURE_EL3);
+cpu->kvm_target = QEMU_KVM_ARM_TARGET_GENERIC_V8;
+cpu->midr = 0x410fd000; /* FIXME: this needs to adjust */
+cpu->revidr = 0x;
+cpu->reset_fpsid = 0x41034070;
+cpu->mvfr0 = 0x10110222;
+cpu->mvfr1 = 0x1211;
+cpu->mvfr2 = 0x0043;
+cpu->ctr = 0x84448004; /* L1Ip = VIPT */
+cpu->reset_sctlr = 0x00c50838;
+cpu->id_pfr0 = 0x0131;
+cpu->id_pfr1 = 0x00011011;
+cpu->id_dfr0 = 0x03010066;
+cpu->id_afr0 = 0x;
+cpu->id_mmfr0 = 0x10101105;
+cpu->id_mmfr1 = 0x4000;
+cpu->id_mmfr2 = 0x0126;
+cpu->id_mmfr3 = 0x02102211;
+cpu->id_isar0 = 0x02101110;
+cpu->id_isar1 = 0x13112111;
+cpu->id_isar2 = 0x21232042;
+cpu->id_isar3 = 0x01112131;
+cpu->id_isar4 = 0x00011142;
+cpu->id_isar5 = 0x00011121;
+cpu->id_aa64pfr0 = 0x;
+cpu->id_aa64dfr0 = 0x10305106;
+cpu->id_aa64isar0 = 0x00011120;
+cpu->id_aa64mmfr0 = 0x0f001101; /* only support 4k page, 36 bit physical 
addr */
+cpu->dbgdidr = 0x3516d000;
+cpu->clidr = 0x0a200023;
+cpu->ccsidr[0] = 0x7003e01a; /* 8KB L1 dcache */
+cpu->ccsidr[1] = 0x2007e00a; /* 8KB L1 icache */
+cpu->ccsidr[2] = 0x700fe07a; /* 128KB L2 cache */
+cpu->dcz_blocksize = 4; /* 64 bytes */
+define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
+}
+
 #ifdef CONFIG_USER_ONLY
 static void aarch64_any_initfn(Object *obj)
 {
@@ -232,6 +285,7 @@ typedef struct ARMCPUInfo {
 static const ARMCPUInfo aarch64_cpus[] = {
 { .name = "cortex-a57", .initfn = aarch64_a57_initfn },
 { .name = "cortex-a53", .initfn = aarch64_a53_initfn },
+{ .name = "generic",.initfn = aarch64_generic_initfn },
 #ifdef CONFIG_USER_ONLY
 { .name = "any", .initfn = aarch64_any_initfn },
 #endif
-- 
2.0.4


___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm