Re: [PATCH v3 4/6] x86,s390/mm: Move sme_active() and sme_me_mask to x86-specific header
On Thu, Jul 18, 2019 at 05:42:18PM +, Lendacky, Thomas wrote: > You may want to try and build the out-of-tree nvidia driver just to be > sure you can remove the EXPORT_SYMBOL(). But I believe that was related > to the DMA mask check, which now removed, may no longer be a problem. Out of tree driver simply don't matter for kernel development decisions.
Re: [PATCH v3 4/6] x86, s390/mm: Move sme_active() and sme_me_mask to x86-specific header
On 7/17/19 10:28 PM, Thiago Jung Bauermann wrote: > Now that generic code doesn't reference them, move sme_active() and > sme_me_mask to x86's . > > Also remove the export for sme_active() since it's only used in files that > won't be built as modules. sme_me_mask on the other hand is used in > arch/x86/kvm/svm.c (via __sme_set() and __psp_pa()) which can be built as a > module so its export needs to stay. You may want to try and build the out-of-tree nvidia driver just to be sure you can remove the EXPORT_SYMBOL(). But I believe that was related to the DMA mask check, which now removed, may no longer be a problem. > > Signed-off-by: Thiago Jung Bauermann Reviewed-by: Tom Lendacky > --- > arch/s390/include/asm/mem_encrypt.h | 4 +--- > arch/x86/include/asm/mem_encrypt.h | 10 ++ > arch/x86/mm/mem_encrypt.c | 1 - > include/linux/mem_encrypt.h | 14 +- > 4 files changed, 12 insertions(+), 17 deletions(-) > > diff --git a/arch/s390/include/asm/mem_encrypt.h > b/arch/s390/include/asm/mem_encrypt.h > index 3eb018508190..ff813a56bc30 100644 > --- a/arch/s390/include/asm/mem_encrypt.h > +++ b/arch/s390/include/asm/mem_encrypt.h > @@ -4,9 +4,7 @@ > > #ifndef __ASSEMBLY__ > > -#define sme_me_mask 0ULL > - > -static inline bool sme_active(void) { return false; } > +static inline bool mem_encrypt_active(void) { return false; } > extern bool sev_active(void); > > int set_memory_encrypted(unsigned long addr, int numpages); > diff --git a/arch/x86/include/asm/mem_encrypt.h > b/arch/x86/include/asm/mem_encrypt.h > index 0c196c47d621..848ce43b9040 100644 > --- a/arch/x86/include/asm/mem_encrypt.h > +++ b/arch/x86/include/asm/mem_encrypt.h > @@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned > long size) { return 0; > > extern char __start_bss_decrypted[], __end_bss_decrypted[], > __start_bss_decrypted_unused[]; > > +static inline bool mem_encrypt_active(void) > +{ > + return sme_me_mask; > +} > + > +static inline u64 sme_get_me_mask(void) > +{ > + return sme_me_mask; > +} > + > #endif /* __ASSEMBLY__ */ > > #endif /* __X86_MEM_ENCRYPT_H__ */ > diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c > index c805f0a5c16e..7139f2f43955 100644 > --- a/arch/x86/mm/mem_encrypt.c > +++ b/arch/x86/mm/mem_encrypt.c > @@ -344,7 +344,6 @@ bool sme_active(void) > { > return sme_me_mask && !sev_enabled; > } > -EXPORT_SYMBOL(sme_active); > > bool sev_active(void) > { > diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h > index 470bd53a89df..0c5b0ff9eb29 100644 > --- a/include/linux/mem_encrypt.h > +++ b/include/linux/mem_encrypt.h > @@ -18,23 +18,11 @@ > > #else/* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ > > -#define sme_me_mask 0ULL > - > -static inline bool sme_active(void) { return false; } > +static inline bool mem_encrypt_active(void) { return false; } > static inline bool sev_active(void) { return false; } > > #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ > > -static inline bool mem_encrypt_active(void) > -{ > - return sme_me_mask; > -} > - > -static inline u64 sme_get_me_mask(void) > -{ > - return sme_me_mask; > -} > - > #ifdef CONFIG_AMD_MEM_ENCRYPT > /* > * The __sme_set() and __sme_clr() macros are useful for adding or removing >
Re: [PATCH v3 4/6] x86,s390/mm: Move sme_active() and sme_me_mask to x86-specific header
On Thu, Jul 18, 2019 at 12:28:56AM -0300, Thiago Jung Bauermann wrote: > Now that generic code doesn't reference them, move sme_active() and > sme_me_mask to x86's . > > Also remove the export for sme_active() since it's only used in files that > won't be built as modules. sme_me_mask on the other hand is used in > arch/x86/kvm/svm.c (via __sme_set() and __psp_pa()) which can be built as a > module so its export needs to stay. > > Signed-off-by: Thiago Jung Bauermann Looks good, Reviewed-by: Christoph Hellwig
[PATCH v3 4/6] x86, s390/mm: Move sme_active() and sme_me_mask to x86-specific header
Now that generic code doesn't reference them, move sme_active() and sme_me_mask to x86's . Also remove the export for sme_active() since it's only used in files that won't be built as modules. sme_me_mask on the other hand is used in arch/x86/kvm/svm.c (via __sme_set() and __psp_pa()) which can be built as a module so its export needs to stay. Signed-off-by: Thiago Jung Bauermann --- arch/s390/include/asm/mem_encrypt.h | 4 +--- arch/x86/include/asm/mem_encrypt.h | 10 ++ arch/x86/mm/mem_encrypt.c | 1 - include/linux/mem_encrypt.h | 14 +- 4 files changed, 12 insertions(+), 17 deletions(-) diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h index 3eb018508190..ff813a56bc30 100644 --- a/arch/s390/include/asm/mem_encrypt.h +++ b/arch/s390/include/asm/mem_encrypt.h @@ -4,9 +4,7 @@ #ifndef __ASSEMBLY__ -#define sme_me_mask0ULL - -static inline bool sme_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } extern bool sev_active(void); int set_memory_encrypted(unsigned long addr, int numpages); diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 0c196c47d621..848ce43b9040 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; +static inline bool mem_encrypt_active(void) +{ + return sme_me_mask; +} + +static inline u64 sme_get_me_mask(void) +{ + return sme_me_mask; +} + #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index c805f0a5c16e..7139f2f43955 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -344,7 +344,6 @@ bool sme_active(void) { return sme_me_mask && !sev_enabled; } -EXPORT_SYMBOL(sme_active); bool sev_active(void) { diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 470bd53a89df..0c5b0ff9eb29 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -18,23 +18,11 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ -#define sme_me_mask0ULL - -static inline bool sme_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } static inline bool sev_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ -static inline bool mem_encrypt_active(void) -{ - return sme_me_mask; -} - -static inline u64 sme_get_me_mask(void) -{ - return sme_me_mask; -} - #ifdef CONFIG_AMD_MEM_ENCRYPT /* * The __sme_set() and __sme_clr() macros are useful for adding or removing