Re: [PATCH v5 22/23] arm64: KVM: Allow mapping of vectors outside of the RAM region

2018-03-14 Thread Marc Zyngier
On 14/03/18 11:40, James Morse wrote:
> Hi Marc,
> 
> On 01/03/18 15:55, Marc Zyngier wrote:
>> We're now ready to map our vectors in weird and wonderful locations.
>> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
>> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
>> and gets mapped outside of the normal RAM region, next to the
>> idmap.
>>
>> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
>> of the rest of the hypervisor code.
> 
>> diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
>> index c58cc5dbe667..c5dab30d3389 100644
>> --- a/Documentation/arm64/memory.txt
>> +++ b/Documentation/arm64/memory.txt
>> @@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, 
>> the
>>  hypervisor maps kernel pages in EL2 at a fixed (and potentially
>>  random) offset from the linear mapping. See the kern_hyp_va macro and
>>  kvm_update_va_mask function for more details. MMIO devices such as
>> -GICv2 gets mapped next to the HYP idmap page.
>> +GICv2 gets mapped next to the HYP idmap page, as do vectors when
>> +ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
>>  
>>  When using KVM with the Virtualization Host Extensions, no additional
>>  mappings are created, since the host kernel runs directly in EL2.
> 
>> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
>> b/arch/arm64/include/asm/kvm_mmu.h
>> index 3da9e5aea936..433d13d0c271 100644
>> --- a/arch/arm64/include/asm/kvm_mmu.h
>> +++ b/arch/arm64/include/asm/kvm_mmu.h
>> @@ -360,33 +360,90 @@ static inline unsigned int kvm_get_vmid_bits(void)
>>  return (cpuid_feature_extract_unsigned_field(reg, 
>> ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
>>  }
>>  
>> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
>> +#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
>> + defined(CONFIG_HARDEN_EL2_VECTORS))
>> +/*
>> + * EL2 vectors can be mapped and rerouted in a number of ways,
>> + * depending on the kernel configuration and CPU present:
>> + *
>> + * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
>> + *   hardening sequence is placed in one of the vector slots, which is
>> + *   executed before jumping to the real vectors.
>> + *
>> + * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS and BP
>> + *   hardening, the slot containing the hardening sequence is mapped
>> + *   next to the idmap page, and executed before jumping to the real
>> + *   vectors.
>> + *
>> + * - If the CPU only has ARM64_HARDEN_EL2_VECTORS, then an empty slot
>> + *   is selected, mapped next to the idmap page, and executed before
>> + *   jumping to the real vectors.
> 
>> + * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
>> + * VHE, as we don't have hypervisor-specific mappings. If the system
>> + * is VHE and yet selects this capability, it will be ignored.
> 
> Silently? This isn't a problem as the CPUs you enable this for don't have VHE.
> Is it worth a warning? If we did ever need to support it, we can pull the same
> trick the arch code uses, using a fixmap entry for the vectors.

I guess I can put a WARN_ONCE() there, wouldn't hurt. The fixmap approach would 
work, but it is just complicated on heterogeneous systems, as you'd have to 
have one fixmap entry per CPU type. I'm not looking forward to implementing 
this!

> 
> 
>> + */
>>  #include 
>>  
>> +extern void *__kvm_bp_vect_base;
>> +extern int __kvm_harden_el2_vector_slot;
>> +
>>  static inline void *kvm_get_hyp_vector(void)
>>  {
>>  struct bp_hardening_data *data = arm64_get_bp_hardening_data();
>> -void *vect = kvm_ksym_ref(__kvm_hyp_vector);
>> +int slot = -1;
>> +
>> +if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn)
>> +slot = data->hyp_vectors_slot;
>> +
>> +if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) &&
>> +!has_vhe() && slot == -1)
>> +slot = __kvm_harden_el2_vector_slot;
>>  
>> -if (data->fn) {
>> -vect = __bp_harden_hyp_vecs_start +
>> -   data->hyp_vectors_slot * SZ_2K;
>> +if (slot != -1) {
>> +void *vect;
>>  
>>  if (!has_vhe())
>> -vect = lm_alias(vect);
>> +vect = __kvm_bp_vect_base;
>> +else
>> +vect = __bp_harden_hyp_vecs_start;
>> +vect += slot * SZ_2K;
>> +
>> +return vect;
>>  }
>>  
>> -vect = kern_hyp_va(vect);
>> -return vect;
>> +return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
>>  }
>>  
>> +/*  This is only called on a !VHE system */
>>  static inline int kvm_map_vectors(void)
>>  {
>> -return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
>> -   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
>> -   PAGE_HYP_EXEC);
>> -}
>> +phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);
>> +unsigned long size = __bp_harden_hyp_vecs_e

Re: [PATCH v5 22/23] arm64: KVM: Allow mapping of vectors outside of the RAM region

2018-03-14 Thread James Morse
Hi Marc,

On 01/03/18 15:55, Marc Zyngier wrote:
> We're now ready to map our vectors in weird and wonderful locations.
> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
> and gets mapped outside of the normal RAM region, next to the
> idmap.
> 
> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
> of the rest of the hypervisor code.

> diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
> index c58cc5dbe667..c5dab30d3389 100644
> --- a/Documentation/arm64/memory.txt
> +++ b/Documentation/arm64/memory.txt
> @@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, 
> the
>  hypervisor maps kernel pages in EL2 at a fixed (and potentially
>  random) offset from the linear mapping. See the kern_hyp_va macro and
>  kvm_update_va_mask function for more details. MMIO devices such as
> -GICv2 gets mapped next to the HYP idmap page.
> +GICv2 gets mapped next to the HYP idmap page, as do vectors when
> +ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
>  
>  When using KVM with the Virtualization Host Extensions, no additional
>  mappings are created, since the host kernel runs directly in EL2.

> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
> b/arch/arm64/include/asm/kvm_mmu.h
> index 3da9e5aea936..433d13d0c271 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -360,33 +360,90 @@ static inline unsigned int kvm_get_vmid_bits(void)
>   return (cpuid_feature_extract_unsigned_field(reg, 
> ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
>  }
>  
> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> +#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||  \
> + defined(CONFIG_HARDEN_EL2_VECTORS))
> +/*
> + * EL2 vectors can be mapped and rerouted in a number of ways,
> + * depending on the kernel configuration and CPU present:
> + *
> + * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
> + *   hardening sequence is placed in one of the vector slots, which is
> + *   executed before jumping to the real vectors.
> + *
> + * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS and BP
> + *   hardening, the slot containing the hardening sequence is mapped
> + *   next to the idmap page, and executed before jumping to the real
> + *   vectors.
> + *
> + * - If the CPU only has ARM64_HARDEN_EL2_VECTORS, then an empty slot
> + *   is selected, mapped next to the idmap page, and executed before
> + *   jumping to the real vectors.

> + * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
> + * VHE, as we don't have hypervisor-specific mappings. If the system
> + * is VHE and yet selects this capability, it will be ignored.

Silently? This isn't a problem as the CPUs you enable this for don't have VHE.
Is it worth a warning? If we did ever need to support it, we can pull the same
trick the arch code uses, using a fixmap entry for the vectors.


> + */
>  #include 
>  
> +extern void *__kvm_bp_vect_base;
> +extern int __kvm_harden_el2_vector_slot;
> +
>  static inline void *kvm_get_hyp_vector(void)
>  {
>   struct bp_hardening_data *data = arm64_get_bp_hardening_data();
> - void *vect = kvm_ksym_ref(__kvm_hyp_vector);
> + int slot = -1;
> +
> + if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn)
> + slot = data->hyp_vectors_slot;
> +
> + if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) &&
> + !has_vhe() && slot == -1)
> + slot = __kvm_harden_el2_vector_slot;
>  
> - if (data->fn) {
> - vect = __bp_harden_hyp_vecs_start +
> -data->hyp_vectors_slot * SZ_2K;
> + if (slot != -1) {
> + void *vect;
>  
>   if (!has_vhe())
> - vect = lm_alias(vect);
> + vect = __kvm_bp_vect_base;
> + else
> + vect = __bp_harden_hyp_vecs_start;
> + vect += slot * SZ_2K;
> +
> + return vect;
>   }
>  
> - vect = kern_hyp_va(vect);
> - return vect;
> + return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
>  }
>  
> +/*  This is only called on a !VHE system */
>  static inline int kvm_map_vectors(void)
>  {
> - return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> -kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> -PAGE_HYP_EXEC);
> -}
> + phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);
> + unsigned long size = __bp_harden_hyp_vecs_end - 
> __bp_harden_hyp_vecs_start;
> +
> + if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> + int ret;
> +
> + ret = 
> create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> +   
> kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> +   PAGE_HYP_EXEC);

We don't ha

Re: [PATCH v5 22/23] arm64: KVM: Allow mapping of vectors outside of the RAM region

2018-03-13 Thread Andrew Jones
On Tue, Mar 13, 2018 at 10:30:12AM +, Marc Zyngier wrote:
> >> +
> >> +if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> > 
> > Moving the create_hyp_mappings() under this cap check looks like a fix
> > that could be posted separately?
> 
> We could. Not sure that's a big deal though.

Nah, it isn't worth the fuss.

> >> +
> >> +  if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
> >> +  __kvm_harden_el2_vector_slot = 
> >> atomic_inc_return(&arm64_el2_vector_last_slot);
> > 
> > If I understood the logic in the above function correctly, then we won't
> > be using this slot when we have the ARM64_HARDEN_BRANCH_PREDICTOR cap.
> > Should we even bother allocating it when we don't intend to use it?
> 
> You could be on a system with a mix of affected and non-affected CPUs,
> and the capability only tells you that at least one of the CPUs is
> affected. For the non affected CPUs, we won't have a slot (since there
> is no workaround to provide), unless we actually allocated one.
>

Ah yes, the heterogeneous considerations that I always forget to consider.

Thanks,
drew
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v5 22/23] arm64: KVM: Allow mapping of vectors outside of the RAM region

2018-03-13 Thread Marc Zyngier
Hi Drew,

On 08/03/18 17:54, Andrew Jones wrote:
> On Thu, Mar 01, 2018 at 03:55:37PM +, Marc Zyngier wrote:
>> We're now ready to map our vectors in weird and wonderful locations.
>> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
>> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
>> and gets mapped outside of the normal RAM region, next to the
>> idmap.
>>
>> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
>> of the rest of the hypervisor code.
>>
>> Signed-off-by: Marc Zyngier 
>> ---
>>  Documentation/arm64/memory.txt   |  3 +-
>>  arch/arm64/Kconfig   | 16 
>>  arch/arm64/include/asm/kvm_mmu.h | 81 
>> ++--
>>  arch/arm64/include/asm/mmu.h |  5 ++-
>>  arch/arm64/kernel/Makefile   |  4 +-
>>  arch/arm64/kvm/va_layout.c   |  3 ++
>>  6 files changed, 96 insertions(+), 16 deletions(-)
>>
>> diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
>> index c58cc5dbe667..c5dab30d3389 100644
>> --- a/Documentation/arm64/memory.txt
>> +++ b/Documentation/arm64/memory.txt
>> @@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, 
>> the
>>  hypervisor maps kernel pages in EL2 at a fixed (and potentially
>>  random) offset from the linear mapping. See the kern_hyp_va macro and
>>  kvm_update_va_mask function for more details. MMIO devices such as
>> -GICv2 gets mapped next to the HYP idmap page.
>> +GICv2 gets mapped next to the HYP idmap page, as do vectors when
>> +ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
>>  
>>  When using KVM with the Virtualization Host Extensions, no additional
>>  mappings are created, since the host kernel runs directly in EL2.
>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>> index 7381eeb7ef8e..e6be4393aaad 100644
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -904,6 +904,22 @@ config HARDEN_BRANCH_PREDICTOR
>>  
>>If unsure, say Y.
>>  
>> +config HARDEN_EL2_VECTORS
>> +bool "Harden EL2 vector mapping against system register leak" if EXPERT
>> +default y
>> +help
>> +  Speculation attacks against some high-performance processors can
>> +  be used to leak privileged information such as the vector base
>> +  register, resulting in a potential defeat of the EL2 layout
>> +  randomization.
>> +
>> +  This config option will map the vectors to a fixed location,
>> +  independent of the EL2 code mapping, so that revealing VBAR_EL2
>> +  to an attacker does no give away any extra information. This
>> +  only gets enabled on affected CPUs.
>> +
>> +  If unsure, say Y.
>> +
>>  menuconfig ARMV8_DEPRECATED
>>  bool "Emulate deprecated/obsolete ARMv8 instructions"
>>  depends on COMPAT
>> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
>> b/arch/arm64/include/asm/kvm_mmu.h
>> index 3da9e5aea936..433d13d0c271 100644
>> --- a/arch/arm64/include/asm/kvm_mmu.h
>> +++ b/arch/arm64/include/asm/kvm_mmu.h
>> @@ -360,33 +360,90 @@ static inline unsigned int kvm_get_vmid_bits(void)
>>  return (cpuid_feature_extract_unsigned_field(reg, 
>> ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
>>  }
>>  
>> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
>> +#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
>> + defined(CONFIG_HARDEN_EL2_VECTORS))
>> +/*
>> + * EL2 vectors can be mapped and rerouted in a number of ways,
>> + * depending on the kernel configuration and CPU present:
>> + *
>> + * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
>> + *   hardening sequence is placed in one of the vector slots, which is
>> + *   executed before jumping to the real vectors.
>> + *
>> + * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS and BP
>> + *   hardening, the slot containing the hardening sequence is mapped
>> + *   next to the idmap page, and executed before jumping to the real
>> + *   vectors.
>> + *
>> + * - If the CPU only has ARM64_HARDEN_EL2_VECTORS, then an empty slot
>> + *   is selected, mapped next to the idmap page, and executed before
>> + *   jumping to the real vectors.
>> + *
>> + * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
>> + * VHE, as we don't have hypervisor-specific mappings. If the system
>> + * is VHE and yet selects this capability, it will be ignored.
>> + */
>>  #include 
>>  
>> +extern void *__kvm_bp_vect_base;
>> +extern int __kvm_harden_el2_vector_slot;
>> +
>>  static inline void *kvm_get_hyp_vector(void)
>>  {
>>  struct bp_hardening_data *data = arm64_get_bp_hardening_data();
>> -void *vect = kvm_ksym_ref(__kvm_hyp_vector);
>> +int slot = -1;
>> +
>> +if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn)
>> +slot = data->hyp_vectors_slot;
>> +
>> +if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) &&
>> +!has_vhe() && slot == -1)
>> +slot = __kvm_harden_el2_vector_slot;
>>  
>> -if (data->fn) {
>> - 

Re: [PATCH v5 22/23] arm64: KVM: Allow mapping of vectors outside of the RAM region

2018-03-12 Thread Marc Zyngier
On 09/03/18 18:59, James Morse wrote:
> Hi Marc,
> 
> On 01/03/18 15:55, Marc Zyngier wrote:
>> We're now ready to map our vectors in weird and wonderful locations.
>> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
>> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
>> and gets mapped outside of the normal RAM region, next to the
>> idmap.
>>
>> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
>> of the rest of the hypervisor code.
> 
>> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
>> b/arch/arm64/include/asm/kvm_mmu.h
>> index 3da9e5aea936..433d13d0c271 100644
>> --- a/arch/arm64/include/asm/kvm_mmu.h
>> +++ b/arch/arm64/include/asm/kvm_mmu.h
> 
> [..]
> 
>>  
>> +/*  This is only called on a !VHE system */
>>  static inline int kvm_map_vectors(void)
>>  {
>> -return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
>> -   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
>> -   PAGE_HYP_EXEC);
>> -}
>> +phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);
> 
> __pa_symbol()?
> 
> A gift from CONFIG_DEBUG_VIRTUAL:
> 
> [3.479878] kvm [1]: 8-bit VMID
> [3.500761] [ cut here ]
> [3.505608] virt_to_phys used for non-linear address: 6fa7ae39
> (__bp_harden_hyp_vecs_start+0x0/0x2000)
> [3.515907] WARNING: CPU: 3 PID: 1 at ../arch/arm64/mm/physaddr.c:15
> __virt_to_phys+0x48/0x68
> [3.524614] Modules linked in:
> [3.527782] CPU: 3 PID: 1 Comm: swapper/0 Not tainted
> 4.16.0-rc4-00024-gf6f4460e41ba-dirty #9396
> [3.536751] Hardware name: ARM Juno development board (r1) (DT)
> [3.542806] pstate: 8045 (Nzcv daif +PAN -UAO)
> [3.547716] pc : __virt_to_phys+0x48/0x68
> [3.551832] lr : __virt_to_phys+0x48/0x68
> 
> [3.641447] Call trace:
> [3.643975]  __virt_to_phys+0x48/0x68
> [3.647739]  kvm_arch_init+0x2fc/0x734
> [3.651589]  kvm_init+0x28/0x2b0
> [3.654910]  arm_init+0x1c/0x24
> [3.658143]  do_one_initcall+0x38/0x11c
> [3.662083]  kernel_init_freeable+0x1e0/0x27c
> [3.666552]  kernel_init+0x10/0xfc
> [3.670049]  ret_from_fork+0x10/0x18
> [3.673731] ---[ end trace d4ef061e4bf05fc6 ]---
> [3.678870] kvm [1]: vgic-v2@2c04f000
> [3.683424] kvm [1]: vgic interrupt IRQ1
> [3.687675] kvm [1]: virtual timer IRQ5
> [3.692375] kvm [1]: Hyp mode initialized successfully
> [3.718640] Initialise system trusted keyrings
Nice catch. Fixed locally.

Thanks,

M.
-- 
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v5 22/23] arm64: KVM: Allow mapping of vectors outside of the RAM region

2018-03-09 Thread James Morse
Hi Marc,

On 01/03/18 15:55, Marc Zyngier wrote:
> We're now ready to map our vectors in weird and wonderful locations.
> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
> and gets mapped outside of the normal RAM region, next to the
> idmap.
> 
> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
> of the rest of the hypervisor code.

> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
> b/arch/arm64/include/asm/kvm_mmu.h
> index 3da9e5aea936..433d13d0c271 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h

[..]

>  
> +/*  This is only called on a !VHE system */
>  static inline int kvm_map_vectors(void)
>  {
> - return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> -kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> -PAGE_HYP_EXEC);
> -}
> + phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);

__pa_symbol()?

A gift from CONFIG_DEBUG_VIRTUAL:

[3.479878] kvm [1]: 8-bit VMID
[3.500761] [ cut here ]
[3.505608] virt_to_phys used for non-linear address: 6fa7ae39
(__bp_harden_hyp_vecs_start+0x0/0x2000)
[3.515907] WARNING: CPU: 3 PID: 1 at ../arch/arm64/mm/physaddr.c:15
__virt_to_phys+0x48/0x68
[3.524614] Modules linked in:
[3.527782] CPU: 3 PID: 1 Comm: swapper/0 Not tainted
4.16.0-rc4-00024-gf6f4460e41ba-dirty #9396
[3.536751] Hardware name: ARM Juno development board (r1) (DT)
[3.542806] pstate: 8045 (Nzcv daif +PAN -UAO)
[3.547716] pc : __virt_to_phys+0x48/0x68
[3.551832] lr : __virt_to_phys+0x48/0x68

[3.641447] Call trace:
[3.643975]  __virt_to_phys+0x48/0x68
[3.647739]  kvm_arch_init+0x2fc/0x734
[3.651589]  kvm_init+0x28/0x2b0
[3.654910]  arm_init+0x1c/0x24
[3.658143]  do_one_initcall+0x38/0x11c
[3.662083]  kernel_init_freeable+0x1e0/0x27c
[3.666552]  kernel_init+0x10/0xfc
[3.670049]  ret_from_fork+0x10/0x18
[3.673731] ---[ end trace d4ef061e4bf05fc6 ]---
[3.678870] kvm [1]: vgic-v2@2c04f000
[3.683424] kvm [1]: vgic interrupt IRQ1
[3.687675] kvm [1]: virtual timer IRQ5
[3.692375] kvm [1]: Hyp mode initialized successfully
[3.718640] Initialise system trusted keyrings



> + unsigned long size = __bp_harden_hyp_vecs_end - 
> __bp_harden_hyp_vecs_start;
> +
> + if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> + int ret;
> +
> + ret = 
> create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> +   
> kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> +   PAGE_HYP_EXEC);
> +
> + if (ret)
> + return ret;
> +
> + __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
> + __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
> + }
> +
> + if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
> + __kvm_harden_el2_vector_slot = 
> atomic_inc_return(&arm64_el2_vector_last_slot);
> + BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
> + return create_hyp_exec_mappings(vect_pa, size,
> + &__kvm_bp_vect_base);
> + }
>  
> + return 0;
> +}


Thanks,

James

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v5 22/23] arm64: KVM: Allow mapping of vectors outside of the RAM region

2018-03-08 Thread Andrew Jones
On Thu, Mar 01, 2018 at 03:55:37PM +, Marc Zyngier wrote:
> We're now ready to map our vectors in weird and wonderful locations.
> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
> and gets mapped outside of the normal RAM region, next to the
> idmap.
> 
> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
> of the rest of the hypervisor code.
> 
> Signed-off-by: Marc Zyngier 
> ---
>  Documentation/arm64/memory.txt   |  3 +-
>  arch/arm64/Kconfig   | 16 
>  arch/arm64/include/asm/kvm_mmu.h | 81 
> ++--
>  arch/arm64/include/asm/mmu.h |  5 ++-
>  arch/arm64/kernel/Makefile   |  4 +-
>  arch/arm64/kvm/va_layout.c   |  3 ++
>  6 files changed, 96 insertions(+), 16 deletions(-)
> 
> diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
> index c58cc5dbe667..c5dab30d3389 100644
> --- a/Documentation/arm64/memory.txt
> +++ b/Documentation/arm64/memory.txt
> @@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, 
> the
>  hypervisor maps kernel pages in EL2 at a fixed (and potentially
>  random) offset from the linear mapping. See the kern_hyp_va macro and
>  kvm_update_va_mask function for more details. MMIO devices such as
> -GICv2 gets mapped next to the HYP idmap page.
> +GICv2 gets mapped next to the HYP idmap page, as do vectors when
> +ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
>  
>  When using KVM with the Virtualization Host Extensions, no additional
>  mappings are created, since the host kernel runs directly in EL2.
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 7381eeb7ef8e..e6be4393aaad 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -904,6 +904,22 @@ config HARDEN_BRANCH_PREDICTOR
>  
> If unsure, say Y.
>  
> +config HARDEN_EL2_VECTORS
> + bool "Harden EL2 vector mapping against system register leak" if EXPERT
> + default y
> + help
> +   Speculation attacks against some high-performance processors can
> +   be used to leak privileged information such as the vector base
> +   register, resulting in a potential defeat of the EL2 layout
> +   randomization.
> +
> +   This config option will map the vectors to a fixed location,
> +   independent of the EL2 code mapping, so that revealing VBAR_EL2
> +   to an attacker does no give away any extra information. This
> +   only gets enabled on affected CPUs.
> +
> +   If unsure, say Y.
> +
>  menuconfig ARMV8_DEPRECATED
>   bool "Emulate deprecated/obsolete ARMv8 instructions"
>   depends on COMPAT
> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
> b/arch/arm64/include/asm/kvm_mmu.h
> index 3da9e5aea936..433d13d0c271 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -360,33 +360,90 @@ static inline unsigned int kvm_get_vmid_bits(void)
>   return (cpuid_feature_extract_unsigned_field(reg, 
> ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
>  }
>  
> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> +#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||  \
> + defined(CONFIG_HARDEN_EL2_VECTORS))
> +/*
> + * EL2 vectors can be mapped and rerouted in a number of ways,
> + * depending on the kernel configuration and CPU present:
> + *
> + * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
> + *   hardening sequence is placed in one of the vector slots, which is
> + *   executed before jumping to the real vectors.
> + *
> + * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS and BP
> + *   hardening, the slot containing the hardening sequence is mapped
> + *   next to the idmap page, and executed before jumping to the real
> + *   vectors.
> + *
> + * - If the CPU only has ARM64_HARDEN_EL2_VECTORS, then an empty slot
> + *   is selected, mapped next to the idmap page, and executed before
> + *   jumping to the real vectors.
> + *
> + * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
> + * VHE, as we don't have hypervisor-specific mappings. If the system
> + * is VHE and yet selects this capability, it will be ignored.
> + */
>  #include 
>  
> +extern void *__kvm_bp_vect_base;
> +extern int __kvm_harden_el2_vector_slot;
> +
>  static inline void *kvm_get_hyp_vector(void)
>  {
>   struct bp_hardening_data *data = arm64_get_bp_hardening_data();
> - void *vect = kvm_ksym_ref(__kvm_hyp_vector);
> + int slot = -1;
> +
> + if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn)
> + slot = data->hyp_vectors_slot;
> +
> + if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) &&
> + !has_vhe() && slot == -1)
> + slot = __kvm_harden_el2_vector_slot;
>  
> - if (data->fn) {
> - vect = __bp_harden_hyp_vecs_start +
> -data->hyp_vectors_slot * SZ_2K;
> + if (slot != -1) {
> +