[PATCH v4 4/5] x86/head/64: Check SEV encryption before switching to kernel page-table

2020-10-28 Thread Joerg Roedel
From: Joerg Roedel 

When SEV is enabled the kernel requests the C-Bit position again from
the hypervisor to built its own page-table. Since the hypervisor is an
untrusted source the C-bit position needs to be verified before the
kernel page-table is used.

Call the sev_verify_cbit() function before writing the CR3.

Signed-off-by: Joerg Roedel 
---
 arch/x86/kernel/head_64.S | 16 
 arch/x86/mm/mem_encrypt.c |  1 +
 2 files changed, 17 insertions(+)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7eb2a1c87969..3c417734790f 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -161,6 +161,21 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, 
SYM_L_GLOBAL)
 
/* Setup early boot stage 4-/5-level pagetables. */
addqphys_base(%rip), %rax
+
+   /*
+* For SEV guests: Verify that the C-bit is correct. A malicious
+* hypervisor could lie about the C-bit position to perform a ROP
+* attack on the guest by writing to the unencrypted stack and wait for
+* the next RET instruction.
+* %rsi carries pointer to realmode data and is callee-clobbered. Save
+* and restore it.
+*/
+   pushq   %rsi
+   movq%rax, %rdi
+   callsev_verify_cbit
+   popq%rsi
+
+   /* Switch to new page-table */
movq%rax, %cr3
 
/* Ensure I am executing from virtual addresses */
@@ -279,6 +294,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, 
SYM_L_GLOBAL)
 SYM_CODE_END(secondary_startup_64)
 
 #include "verify_cpu.S"
+#include "sev_verify_cbit.S"
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index efbb3de472df..bc0833713be9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -39,6 +39,7 @@
  */
 u64 sme_me_mask __section(".data") = 0;
 u64 sev_status __section(".data") = 0;
+u64 sev_check_data __section(".data") = 0;
 EXPORT_SYMBOL(sme_me_mask);
 DEFINE_STATIC_KEY_FALSE(sev_enable_key);
 EXPORT_SYMBOL_GPL(sev_enable_key);
-- 
2.28.0



Re: [PATCH v4 4/5] x86/head/64: Check SEV encryption before switching to kernel page-table

2020-10-28 Thread Tom Lendacky
On 10/28/20 11:46 AM, Joerg Roedel wrote:
> From: Joerg Roedel 
> 
> When SEV is enabled the kernel requests the C-Bit position again from
> the hypervisor to built its own page-table. Since the hypervisor is an

s/built/build/

> untrusted source the C-bit position needs to be verified before the
> kernel page-table is used.
> 
> Call the sev_verify_cbit() function before writing the CR3.
> 
> Signed-off-by: Joerg Roedel 

Reviewed-by: Tom Lendacky 

> ---
>  arch/x86/kernel/head_64.S | 16 
>  arch/x86/mm/mem_encrypt.c |  1 +
>  2 files changed, 17 insertions(+)
> 
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index 7eb2a1c87969..3c417734790f 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -161,6 +161,21 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, 
> SYM_L_GLOBAL)
>  
>   /* Setup early boot stage 4-/5-level pagetables. */
>   addqphys_base(%rip), %rax
> +
> + /*
> +  * For SEV guests: Verify that the C-bit is correct. A malicious
> +  * hypervisor could lie about the C-bit position to perform a ROP
> +  * attack on the guest by writing to the unencrypted stack and wait for
> +  * the next RET instruction.
> +  * %rsi carries pointer to realmode data and is callee-clobbered. Save
> +  * and restore it.
> +  */
> + pushq   %rsi
> + movq%rax, %rdi
> + callsev_verify_cbit
> + popq%rsi
> +
> + /* Switch to new page-table */
>   movq%rax, %cr3
>  
>   /* Ensure I am executing from virtual addresses */
> @@ -279,6 +294,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, 
> SYM_L_GLOBAL)
>  SYM_CODE_END(secondary_startup_64)
>  
>  #include "verify_cpu.S"
> +#include "sev_verify_cbit.S"
>  
>  #ifdef CONFIG_HOTPLUG_CPU
>  /*
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index efbb3de472df..bc0833713be9 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -39,6 +39,7 @@
>   */
>  u64 sme_me_mask __section(".data") = 0;
>  u64 sev_status __section(".data") = 0;
> +u64 sev_check_data __section(".data") = 0;
>  EXPORT_SYMBOL(sme_me_mask);
>  DEFINE_STATIC_KEY_FALSE(sev_enable_key);
>  EXPORT_SYMBOL_GPL(sev_enable_key);
>