Btw,

Tom, pls remind me again, why didn't we make sme_me_mask u64?

Because this is the most logical type for a memory encryption mask which
covers 64 bits... In any case, the below builds fine here.

---
diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index 8e618fcf1f7c..6a77c63540f7 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,7 +21,7 @@
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
-extern unsigned long sme_me_mask;
+extern u64 sme_me_mask;
 
 void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
                         unsigned long decrypted_kernel_vaddr,
@@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long 
size);
 
 #else  /* !CONFIG_AMD_MEM_ENCRYPT */
 
-#define sme_me_mask    0UL
+#define sme_me_mask    0ULL
 
 static inline void __init sme_early_encrypt(resource_size_t paddr,
                                            unsigned long size) { }
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 0fbd09269757..3fcc8e01683b 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -37,7 +37,7 @@ static char sme_cmdline_off[] __initdata = "off";
  * reside in the .data section so as not to be zeroed out when the .bss
  * section is later cleared.
  */
-unsigned long sme_me_mask __section(.data) = 0;
+u64 sme_me_mask __section(.data) = 0;
 EXPORT_SYMBOL_GPL(sme_me_mask);
 
 /* Buffer used for early in-place encryption by BSP, no locking needed */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09f5e42..265a9cd21cb4 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -21,7 +21,7 @@
 
 #else  /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
 
-#define sme_me_mask    0UL
+#define sme_me_mask    0ULL
 
 #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
 
@@ -30,18 +30,23 @@ static inline bool sme_active(void)
        return !!sme_me_mask;
 }
 
-static inline unsigned long sme_get_me_mask(void)
+static inline u64 sme_get_me_mask(void)
 {
        return sme_me_mask;
 }
 
+#ifdef CONFIG_AMD_MEM_ENCRYPT
 /*
  * The __sme_set() and __sme_clr() macros are useful for adding or removing
  * the encryption mask from a value (e.g. when dealing with pagetable
  * entries).
  */
-#define __sme_set(x)           ((unsigned long)(x) | sme_me_mask)
-#define __sme_clr(x)           ((unsigned long)(x) & ~sme_me_mask)
+#define __sme_set(x)           ((x) | sme_me_mask)
+#define __sme_clr(x)           ((x) & ~sme_me_mask)
+#else
+#define __sme_set(x)           (x)
+#define __sme_clr(x)           (x)
+#endif
 
 #endif /* __ASSEMBLY__ */
 

-- 
Regards/Gruss,
    Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

Reply via email to