Re: [PATCH v2 06/12] x86/sev: Replace occurrences of sev_active() with prot_guest_has()
On Tue, Aug 17, 2021 at 10:26:18AM -0500, Tom Lendacky wrote: > >>/* > >> - * If SME is active we need to be sure that kexec pages are > >> - * not encrypted because when we boot to the new kernel the > >> + * If host memory encryption is active we need to be sure that kexec > >> + * pages are not encrypted because when we boot to the new kernel the > >> * pages won't be accessed encrypted (initially). > >> */ > > > > That hunk belongs logically into the previous patch which removes > > sme_active(). > > I was trying to keep the sev_active() changes separate... so even though > it's an SME thing, I kept it here. But I can move it to the previous > patch, it just might look strange. Oh I meant only the comment because it is a SME-related change. But not too important so whatever. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [PATCH v2 06/12] x86/sev: Replace occurrences of sev_active() with prot_guest_has()
On 8/17/21 5:02 AM, Borislav Petkov wrote: > On Fri, Aug 13, 2021 at 11:59:25AM -0500, Tom Lendacky wrote: >> diff --git a/arch/x86/kernel/machine_kexec_64.c >> b/arch/x86/kernel/machine_kexec_64.c >> index 8e7b517ad738..66ff788b79c9 100644 >> --- a/arch/x86/kernel/machine_kexec_64.c >> +++ b/arch/x86/kernel/machine_kexec_64.c >> @@ -167,7 +167,7 @@ static int init_transition_pgtable(struct kimage *image, >> pgd_t *pgd) >> } >> pte = pte_offset_kernel(pmd, vaddr); >> >> -if (sev_active()) >> +if (prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)) >> prot = PAGE_KERNEL_EXEC; >> >> set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); >> @@ -207,7 +207,7 @@ static int init_pgtable(struct kimage *image, unsigned >> long start_pgtable) >> level4p = (pgd_t *)__va(start_pgtable); >> clear_page(level4p); >> >> -if (sev_active()) { >> +if (prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)) { >> info.page_flag |= _PAGE_ENC; >> info.kernpg_flag |= _PAGE_ENC; >> } >> @@ -570,12 +570,12 @@ void arch_kexec_unprotect_crashkres(void) >> */ >> int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) >> { >> -if (sev_active()) >> +if (!prot_guest_has(PATTR_HOST_MEM_ENCRYPT)) >> return 0; >> >> /* >> - * If SME is active we need to be sure that kexec pages are >> - * not encrypted because when we boot to the new kernel the >> + * If host memory encryption is active we need to be sure that kexec >> + * pages are not encrypted because when we boot to the new kernel the >> * pages won't be accessed encrypted (initially). >> */ > > That hunk belongs logically into the previous patch which removes > sme_active(). I was trying to keep the sev_active() changes separate... so even though it's an SME thing, I kept it here. But I can move it to the previous patch, it just might look strange. > >> return set_memory_decrypted((unsigned long)vaddr, pages); >> @@ -583,12 +583,12 @@ int arch_kexec_post_alloc_pages(void *vaddr, unsigned >> int pages, gfp_t gfp) >> >> void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) >> { >> -if (sev_active()) >> +if (!prot_guest_has(PATTR_HOST_MEM_ENCRYPT)) >> return; >> >> /* >> - * If SME is active we need to reset the pages back to being >> - * an encrypted mapping before freeing them. >> + * If host memory encryption is active we need to reset the pages back >> + * to being an encrypted mapping before freeing them. >> */ >> set_memory_encrypted((unsigned long)vaddr, pages); >> } >> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c >> index e8ccab50ebf6..b69f5ac622d5 100644 >> --- a/arch/x86/kvm/svm/svm.c >> +++ b/arch/x86/kvm/svm/svm.c >> @@ -25,6 +25,7 @@ >> #include >> #include >> #include >> +#include >> >> #include >> #include >> @@ -457,7 +458,7 @@ static int has_svm(void) >> return 0; >> } >> >> -if (sev_active()) { >> +if (prot_guest_has(PATTR_SEV)) { >> pr_info("KVM is unsupported when running as an SEV guest\n"); >> return 0; > > Same question as for PATTR_SME. PATTR_GUEST_MEM_ENCRYPT should be enough. Yup, I'll change them all. > >> @@ -373,7 +373,7 @@ int __init early_set_memory_encrypted(unsigned long >> vaddr, unsigned long size) >> * up under SME the trampoline area cannot be encrypted, whereas under SEV >> * the trampoline area must be encrypted. >> */ >> -bool sev_active(void) >> +static bool sev_active(void) >> { >> return sev_status & MSR_AMD64_SEV_ENABLED; >> } >> @@ -382,7 +382,6 @@ static bool sme_active(void) >> { >> return sme_me_mask && !sev_active(); >> } >> -EXPORT_SYMBOL_GPL(sev_active); > > Just get rid of it altogether. Ok. Thanks, Tom > > Thx. > ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [PATCH v2 06/12] x86/sev: Replace occurrences of sev_active() with prot_guest_has()
On Fri, Aug 13, 2021 at 11:59:25AM -0500, Tom Lendacky wrote: > diff --git a/arch/x86/kernel/machine_kexec_64.c > b/arch/x86/kernel/machine_kexec_64.c > index 8e7b517ad738..66ff788b79c9 100644 > --- a/arch/x86/kernel/machine_kexec_64.c > +++ b/arch/x86/kernel/machine_kexec_64.c > @@ -167,7 +167,7 @@ static int init_transition_pgtable(struct kimage *image, > pgd_t *pgd) > } > pte = pte_offset_kernel(pmd, vaddr); > > - if (sev_active()) > + if (prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)) > prot = PAGE_KERNEL_EXEC; > > set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); > @@ -207,7 +207,7 @@ static int init_pgtable(struct kimage *image, unsigned > long start_pgtable) > level4p = (pgd_t *)__va(start_pgtable); > clear_page(level4p); > > - if (sev_active()) { > + if (prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)) { > info.page_flag |= _PAGE_ENC; > info.kernpg_flag |= _PAGE_ENC; > } > @@ -570,12 +570,12 @@ void arch_kexec_unprotect_crashkres(void) > */ > int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) > { > - if (sev_active()) > + if (!prot_guest_has(PATTR_HOST_MEM_ENCRYPT)) > return 0; > > /* > - * If SME is active we need to be sure that kexec pages are > - * not encrypted because when we boot to the new kernel the > + * If host memory encryption is active we need to be sure that kexec > + * pages are not encrypted because when we boot to the new kernel the >* pages won't be accessed encrypted (initially). >*/ That hunk belongs logically into the previous patch which removes sme_active(). > return set_memory_decrypted((unsigned long)vaddr, pages); > @@ -583,12 +583,12 @@ int arch_kexec_post_alloc_pages(void *vaddr, unsigned > int pages, gfp_t gfp) > > void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) > { > - if (sev_active()) > + if (!prot_guest_has(PATTR_HOST_MEM_ENCRYPT)) > return; > > /* > - * If SME is active we need to reset the pages back to being > - * an encrypted mapping before freeing them. > + * If host memory encryption is active we need to reset the pages back > + * to being an encrypted mapping before freeing them. >*/ > set_memory_encrypted((unsigned long)vaddr, pages); > } > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index e8ccab50ebf6..b69f5ac622d5 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -25,6 +25,7 @@ > #include > #include > #include > +#include > > #include > #include > @@ -457,7 +458,7 @@ static int has_svm(void) > return 0; > } > > - if (sev_active()) { > + if (prot_guest_has(PATTR_SEV)) { > pr_info("KVM is unsupported when running as an SEV guest\n"); > return 0; Same question as for PATTR_SME. PATTR_GUEST_MEM_ENCRYPT should be enough. > @@ -373,7 +373,7 @@ int __init early_set_memory_encrypted(unsigned long > vaddr, unsigned long size) > * up under SME the trampoline area cannot be encrypted, whereas under SEV > * the trampoline area must be encrypted. > */ > -bool sev_active(void) > +static bool sev_active(void) > { > return sev_status & MSR_AMD64_SEV_ENABLED; > } > @@ -382,7 +382,6 @@ static bool sme_active(void) > { > return sme_me_mask && !sev_active(); > } > -EXPORT_SYMBOL_GPL(sev_active); Just get rid of it altogether. Thx. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
[PATCH v2 06/12] x86/sev: Replace occurrences of sev_active() with prot_guest_has()
Replace occurrences of sev_active() with the more generic prot_guest_has() using PATTR_GUEST_MEM_ENCRYPT, except for in arch/x86/mm/mem_encrypt*.c where PATTR_SEV will be used. If future support is added for other memory encryption technologies, the use of PATTR_GUEST_MEM_ENCRYPT can be updated, as required, to use PATTR_SEV. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Ard Biesheuvel Reviewed-by: Joerg Roedel Signed-off-by: Tom Lendacky --- arch/x86/include/asm/mem_encrypt.h | 2 -- arch/x86/kernel/crash_dump_64.c| 4 +++- arch/x86/kernel/kvm.c | 3 ++- arch/x86/kernel/kvmclock.c | 4 ++-- arch/x86/kernel/machine_kexec_64.c | 16 arch/x86/kvm/svm/svm.c | 3 ++- arch/x86/mm/ioremap.c | 6 +++--- arch/x86/mm/mem_encrypt.c | 15 +++ arch/x86/platform/efi/efi_64.c | 9 + 9 files changed, 32 insertions(+), 30 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 956338406cec..7e25de37c148 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void); void __init mem_encrypt_init(void); void __init sev_es_init_vc_handling(void); -bool sev_active(void); bool sev_es_active(void); bool amd_prot_guest_has(unsigned int attr); @@ -75,7 +74,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { } static inline void sev_es_init_vc_handling(void) { } -static inline bool sev_active(void) { return false; } static inline bool sev_es_active(void) { return false; } static inline bool amd_prot_guest_has(unsigned int attr) { return false; } diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 045e82e8945b..0cfe35f03e67 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -10,6 +10,7 @@ #include #include #include +#include static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf, @@ -73,5 +74,6 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) { - return read_from_oldmem(buf, count, ppos, 0, sev_active()); + return read_from_oldmem(buf, count, ppos, 0, + prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)); } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index a26643dc6bd6..9d08ad2f3faa 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -418,7 +419,7 @@ static void __init sev_map_percpu_data(void) { int cpu; - if (!sev_active()) + if (!prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)) return; for_each_possible_cpu(cpu) { diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index ad273e5861c1..f7ba78a23dcd 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -16,9 +16,9 @@ #include #include #include +#include #include -#include #include #include @@ -232,7 +232,7 @@ static void __init kvmclock_init_mem(void) * hvclock is shared between the guest and the hypervisor, must * be mapped decrypted. */ - if (sev_active()) { + if (prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)) { r = set_memory_decrypted((unsigned long) hvclock_mem, 1UL << order); if (r) { diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 8e7b517ad738..66ff788b79c9 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -167,7 +167,7 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) } pte = pte_offset_kernel(pmd, vaddr); - if (sev_active()) + if (prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)) prot = PAGE_KERNEL_EXEC; set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); @@ -207,7 +207,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable) level4p = (pgd_t *)__va(start_pgtable); clear_page(level4p); - if (sev_active()) { + if (prot_guest_has(PATTR_GUEST_MEM_ENCRYPT)) { info.page_flag |= _PAGE_ENC; info.kernpg_flag |= _PAGE_ENC; } @@ -570,12 +570,12 @@ void arch_kexec_unprotect_crashkres(void) */ int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { - if (sev_active()) + if (!prot_guest_has(PATTR_HOST_MEM_ENCRYPT)) return 0; /* -