TDX is going to have more than one reason to fail
enc_status_change_prepare().

Change the callback to return errno instead of assuming -EIO;
enc_status_change_finish() changed too to keep the interface symmetric.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
---
 arch/x86/coco/tdx/tdx.c         | 20 +++++++++++---------
 arch/x86/hyperv/ivm.c           |  9 +++------
 arch/x86/include/asm/x86_init.h |  4 ++--
 arch/x86/kernel/x86_init.c      |  4 ++--
 arch/x86/mm/mem_encrypt_amd.c   |  8 ++++----
 arch/x86/mm/pat/set_memory.c    |  9 +++++----
 6 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 1b5d17a9f70d..2d90043a0e91 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -797,28 +797,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, 
int numpages, bool enc)
        return true;
 }
 
-static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
-                                         bool enc)
+static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
+                                        bool enc)
 {
        /*
         * Only handle shared->private conversion here.
         * See the comment in tdx_early_init().
         */
-       if (enc)
-               return tdx_enc_status_changed(vaddr, numpages, enc);
-       return true;
+       if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
+               return -EIO;
+
+       return 0;
 }
 
-static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
+static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
                                         bool enc)
 {
        /*
         * Only handle private->shared conversion here.
         * See the comment in tdx_early_init().
         */
-       if (!enc)
-               return tdx_enc_status_changed(vaddr, numpages, enc);
-       return true;
+       if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
+               return -EIO;
+
+       return 0;
 }
 
 void __init tdx_early_init(void)
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 02e55237d919..2e1be1afeebe 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -510,13 +510,12 @@ static int hv_mark_gpa_visibility(u16 count, const u64 
pfn[],
  * with host. This function works as wrap of hv_mark_gpa_visibility()
  * with memory base and size.
  */
-static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, 
bool enc)
+static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, 
bool enc)
 {
        enum hv_mem_host_visibility visibility = enc ?
                        VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
        u64 *pfn_array;
        int ret = 0;
-       bool result = true;
        int i, pfn;
 
        pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
@@ -530,17 +529,15 @@ static bool hv_vtom_set_host_visibility(unsigned long 
kbuffer, int pagecount, bo
                if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
                        ret = hv_mark_gpa_visibility(pfn, pfn_array,
                                                     visibility);
-                       if (ret) {
-                               result = false;
+                       if (ret)
                                goto err_free_pfn_array;
-                       }
                        pfn = 0;
                }
        }
 
  err_free_pfn_array:
        kfree(pfn_array);
-       return result;
+       return ret;
 }
 
 static bool hv_vtom_tlb_flush_required(bool private)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index c878616a18b8..c9503fe2d13a 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -150,8 +150,8 @@ struct x86_init_acpi {
  * @enc_cache_flush_required   Returns true if a cache flush is needed before 
changing page encryption status
  */
 struct x86_guest {
-       bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool 
enc);
-       bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool 
enc);
+       int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool 
enc);
+       int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool 
enc);
        bool (*enc_tlb_flush_required)(bool enc);
        bool (*enc_cache_flush_required)(void);
 };
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index a37ebd3b4773..f0f54e109eb9 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -131,8 +131,8 @@ struct x86_cpuinit_ops x86_cpuinit = {
 
 static void default_nmi_init(void) { };
 
-static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, 
bool enc) { return true; }
-static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, 
bool enc) { return true; }
+static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, 
bool enc) { return 0; }
+static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool 
enc) { return 0; }
 static bool enc_tlb_flush_required_noop(bool enc) { return false; }
 static bool enc_cache_flush_required_noop(void) { return false; }
 static bool is_private_mmio_noop(u64 addr) {return false; }
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index a68f2dda0948..6cf6cc8ae6a6 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -282,7 +282,7 @@ static void enc_dec_hypercall(unsigned long vaddr, unsigned 
long size, bool enc)
 #endif
 }
 
-static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, 
bool enc)
+static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool 
enc)
 {
        /*
         * To maintain the security guarantees of SEV-SNP guests, make sure
@@ -291,11 +291,11 @@ static bool amd_enc_status_change_prepare(unsigned long 
vaddr, int npages, bool
        if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
                snp_set_memory_shared(vaddr, npages);
 
-       return true;
+       return 0;
 }
 
 /* Return true unconditionally: return value doesn't matter for the SEV side */
-static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool 
enc)
+static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool 
enc)
 {
        /*
         * After memory is mapped encrypted in the page table, validate it
@@ -307,7 +307,7 @@ static bool amd_enc_status_change_finish(unsigned long 
vaddr, int npages, bool e
        if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
                enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
 
-       return true;
+       return 0;
 }
 
 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index bda9f129835e..6fbf22d5fa56 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2152,8 +2152,9 @@ static int __set_memory_enc_pgtable(unsigned long addr, 
int numpages, bool enc)
                cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
 
        /* Notify hypervisor that we are about to set/clr encryption attribute. 
*/
-       if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
-               return -EIO;
+       ret = x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
+       if (ret)
+               return ret;
 
        ret = __change_page_attr_set_clr(&cpa, 1);
 
@@ -2168,8 +2169,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, 
int numpages, bool enc)
 
        /* Notify hypervisor that we have successfully set/clr encryption 
attribute. */
        if (!ret) {
-               if (!x86_platform.guest.enc_status_change_finish(addr, 
numpages, enc))
-                       ret = -EIO;
+               ret = x86_platform.guest.enc_status_change_finish(addr,
+                                                                 numpages, 
enc);
        }
 
        return ret;
-- 
2.41.0


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

Reply via email to