Replace occurrences of sme_active() with the more generic prot_guest_has()
using PATTR_HOST_MEM_ENCRYPT, except for in arch/x86/mm/mem_encrypt*.c
where PATTR_SME will be used. If future support is added for other memory
encryption technologies, the use of PATTR_HOST_MEM_ENCRYPT can be
updated, as required, to use PATTR_SME.
Cc: Thomas Gleixner
Cc: Ingo Molnar
Cc: Borislav Petkov
Cc: Dave Hansen
Cc: Andy Lutomirski
Cc: Peter Zijlstra
Cc: Joerg Roedel
Cc: Will Deacon
Signed-off-by: Tom Lendacky
---
arch/x86/include/asm/kexec.h | 2 +-
arch/x86/include/asm/mem_encrypt.h | 2 --
arch/x86/kernel/machine_kexec_64.c | 3 ++-
arch/x86/kernel/pci-swiotlb.c| 9 -
arch/x86/kernel/relocate_kernel_64.S | 2 +-
arch/x86/mm/ioremap.c| 6 +++---
arch/x86/mm/mem_encrypt.c| 10 +-
arch/x86/mm/mem_encrypt_identity.c | 3 ++-
arch/x86/realmode/init.c | 5 +++--
drivers/iommu/amd/init.c | 7 ---
10 files changed, 25 insertions(+), 24 deletions(-)
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 0a6e34b07017..11b7c06e2828 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -129,7 +129,7 @@ relocate_kernel(unsigned long indirection_page,
unsigned long page_list,
unsigned long start_address,
unsigned int preserve_context,
- unsigned int sme_active);
+ unsigned int host_mem_enc_active);
#endif
#define ARCH_HAS_KIMAGE_ARCH
diff --git a/arch/x86/include/asm/mem_encrypt.h
b/arch/x86/include/asm/mem_encrypt.h
index a46d47662772..956338406cec 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
void __init mem_encrypt_init(void);
void __init sev_es_init_vc_handling(void);
-bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);
bool amd_prot_guest_has(unsigned int attr);
@@ -76,7 +75,6 @@ static inline void __init sme_encrypt_kernel(struct
boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }
static inline void sev_es_init_vc_handling(void) { }
-static inline bool sme_active(void) { return false; }
static inline bool sev_active(void) { return false; }
static inline bool sev_es_active(void) { return false; }
static inline bool amd_prot_guest_has(unsigned int attr) { return false; }
diff --git a/arch/x86/kernel/machine_kexec_64.c
b/arch/x86/kernel/machine_kexec_64.c
index 131f30fdcfbd..8e7b517ad738 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
#include
#include
@@ -358,7 +359,7 @@ void machine_kexec(struct kimage *image)
(unsigned long)page_list,
image->start,
image->preserve_context,
- sme_active());
+ prot_guest_has(PATTR_HOST_MEM_ENCRYPT));
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index c2cfa5e7c152..bd9a9cfbc9a2 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -6,7 +6,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -45,11 +45,10 @@ int __init pci_swiotlb_detect_4gb(void)
swiotlb = 1;
/*
-* If SME is active then swiotlb will be set to 1 so that bounce
-* buffers are allocated and used for devices that do not support
-* the addressing range required for the encryption mask.
+* Set swiotlb to 1 so that bounce buffers are allocated and used for
+* devices that can't support DMA to encrypted memory.
*/
- if (sme_active())
+ if (prot_guest_has(PATTR_HOST_MEM_ENCRYPT))
swiotlb = 1;
return swiotlb;
diff --git a/arch/x86/kernel/relocate_kernel_64.S
b/arch/x86/kernel/relocate_kernel_64.S
index c53271aebb64..c8fe74a28143 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -47,7 +47,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
* %rsi page_list
* %rdx start address
* %rcx preserve_context
-* %r8 sme_active
+* %r8 host_mem_enc_active
*/
/* Save the CPU context, used for jumping back */
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 60ade7dd71bd..f899f02c0241 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -14,7 +14,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -702,7 +702,7 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr,
unsigned long size,
if (flags &