Re: [RFC PATCH v1 04/28] x86: Secure Encrypted Virtualization (SEV) support

2016-09-22 Thread Borislav Petkov
On Mon, Aug 22, 2016 at 07:24:19PM -0400, Brijesh Singh wrote:
> From: Tom Lendacky <thomas.lenda...@amd.com>

Subject: [RFC PATCH v1 04/28] x86: Secure Encrypted Virtualization (SEV) support

Please start patch commit heading with a verb, i.e.:

"x86: Add AMD Secure Encrypted Virtualization (SEV) support"

-- 
Regards/Gruss,
Boris.

SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 
(AG Nürnberg)
-- 


Re: [RFC PATCH v1 04/28] x86: Secure Encrypted Virtualization (SEV) support

2016-09-22 Thread Borislav Petkov
On Mon, Aug 22, 2016 at 07:24:19PM -0400, Brijesh Singh wrote:
> From: Tom Lendacky 

Subject: [RFC PATCH v1 04/28] x86: Secure Encrypted Virtualization (SEV) support

Please start patch commit heading with a verb, i.e.:

"x86: Add AMD Secure Encrypted Virtualization (SEV) support"

-- 
Regards/Gruss,
Boris.

SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 
(AG Nürnberg)
-- 


[RFC PATCH v1 04/28] x86: Secure Encrypted Virtualization (SEV) support

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

Provide support for Secure Encyrpted Virtualization (SEV). This initial
support defines the SEV active flag in order for the kernel to determine
if it is running with SEV active or not.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/mem_encrypt.h |3 +++
 arch/x86/kernel/mem_encrypt.S  |8 
 arch/x86/kernel/x8664_ksyms_64.c   |1 +
 3 files changed, 12 insertions(+)

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index e395729..9c592d1 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -20,6 +20,7 @@
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
 extern unsigned long sme_me_mask;
+extern unsigned int sev_active;
 
 u8 sme_get_me_loss(void);
 
@@ -50,6 +51,8 @@ void swiotlb_set_mem_dec(void *vaddr, unsigned long size);
 
 #define sme_me_mask0UL
 
+#define sev_active 0
+
 static inline u8 sme_get_me_loss(void)
 {
return 0;
diff --git a/arch/x86/kernel/mem_encrypt.S b/arch/x86/kernel/mem_encrypt.S
index bf9f6a9..6a8cd18 100644
--- a/arch/x86/kernel/mem_encrypt.S
+++ b/arch/x86/kernel/mem_encrypt.S
@@ -96,6 +96,10 @@ ENDPROC(sme_enable)
 
 ENTRY(sme_encrypt_kernel)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
+   /* If SEV is active then the kernel is already encrypted */
+   cmpl$0, sev_active(%rip)
+   jnz .Lencrypt_exit
+
/* If SME is not active then no need to encrypt the kernel */
cmpq$0, sme_me_mask(%rip)
jz  .Lencrypt_exit
@@ -334,6 +338,10 @@ sme_me_loss:
.byte   0x00
.align  8
 
+ENTRY(sev_active)
+   .word   0x
+   .align  8
+
 mem_encrypt_enable_option:
.asciz "mem_encrypt=on"
.align  8
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 651c4c8..14bfc0b 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -88,4 +88,5 @@ EXPORT_SYMBOL(___preempt_schedule_notrace);
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 EXPORT_SYMBOL_GPL(sme_me_mask);
 EXPORT_SYMBOL_GPL(sme_get_me_loss);
+EXPORT_SYMBOL_GPL(sev_active);
 #endif



[RFC PATCH v1 04/28] x86: Secure Encrypted Virtualization (SEV) support

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

Provide support for Secure Encyrpted Virtualization (SEV). This initial
support defines the SEV active flag in order for the kernel to determine
if it is running with SEV active or not.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/mem_encrypt.h |3 +++
 arch/x86/kernel/mem_encrypt.S  |8 
 arch/x86/kernel/x8664_ksyms_64.c   |1 +
 3 files changed, 12 insertions(+)

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index e395729..9c592d1 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -20,6 +20,7 @@
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
 extern unsigned long sme_me_mask;
+extern unsigned int sev_active;
 
 u8 sme_get_me_loss(void);
 
@@ -50,6 +51,8 @@ void swiotlb_set_mem_dec(void *vaddr, unsigned long size);
 
 #define sme_me_mask0UL
 
+#define sev_active 0
+
 static inline u8 sme_get_me_loss(void)
 {
return 0;
diff --git a/arch/x86/kernel/mem_encrypt.S b/arch/x86/kernel/mem_encrypt.S
index bf9f6a9..6a8cd18 100644
--- a/arch/x86/kernel/mem_encrypt.S
+++ b/arch/x86/kernel/mem_encrypt.S
@@ -96,6 +96,10 @@ ENDPROC(sme_enable)
 
 ENTRY(sme_encrypt_kernel)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
+   /* If SEV is active then the kernel is already encrypted */
+   cmpl$0, sev_active(%rip)
+   jnz .Lencrypt_exit
+
/* If SME is not active then no need to encrypt the kernel */
cmpq$0, sme_me_mask(%rip)
jz  .Lencrypt_exit
@@ -334,6 +338,10 @@ sme_me_loss:
.byte   0x00
.align  8
 
+ENTRY(sev_active)
+   .word   0x
+   .align  8
+
 mem_encrypt_enable_option:
.asciz "mem_encrypt=on"
.align  8
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 651c4c8..14bfc0b 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -88,4 +88,5 @@ EXPORT_SYMBOL(___preempt_schedule_notrace);
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 EXPORT_SYMBOL_GPL(sme_me_mask);
 EXPORT_SYMBOL_GPL(sme_get_me_loss);
+EXPORT_SYMBOL_GPL(sev_active);
 #endif