Since cpa->vaddr is invariant, this means we can remove all
workarounds that deal with it changing.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/mm/pageattr-test.c |    7 ++-----
 arch/x86/mm/pageattr.c      |   13 ++++---------
 2 files changed, 6 insertions(+), 14 deletions(-)

--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -124,7 +124,6 @@ static int pageattr_test(void)
        unsigned int level;
        int i, k;
        int err;
-       unsigned long test_addr;
 
        if (print)
                printk(KERN_INFO "CPA self-test:\n");
@@ -181,8 +180,7 @@ static int pageattr_test(void)
 
                switch (i % 3) {
                case 0:
-                       test_addr = addr[i];
-                       err = change_page_attr_set(&test_addr, len[i], 
PAGE_CPA_TEST, 0);
+                       err = change_page_attr_set(&addr[i], len[i], 
PAGE_CPA_TEST, 0);
                        break;
 
                case 1:
@@ -226,8 +224,7 @@ static int pageattr_test(void)
                        failed++;
                        continue;
                }
-               test_addr = addr[i];
-               err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 
0);
+               err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 
0);
                if (err < 0) {
                        printk(KERN_ERR "CPA reverting failed: %d\n", err);
                        failed++;
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1908,15 +1908,13 @@ EXPORT_SYMBOL_GPL(set_memory_array_wt);
 int _set_memory_wc(unsigned long addr, int numpages)
 {
        int ret;
-       unsigned long addr_copy = addr;
 
        ret = change_page_attr_set(&addr, numpages,
                                   cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
                                   0);
        if (!ret) {
-               ret = change_page_attr_set_clr(&addr_copy, numpages,
-                                              cachemode2pgprot(
-                                               _PAGE_CACHE_MODE_WC),
+               ret = change_page_attr_set_clr(&addr, numpages,
+                                              
cachemode2pgprot(_PAGE_CACHE_MODE_WC),
                                               __pgprot(_PAGE_CACHE_MASK),
                                               0, 0, NULL);
        }
@@ -2064,7 +2062,6 @@ int set_memory_global(unsigned long addr
 static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
 {
        struct cpa_data cpa;
-       unsigned long start;
        int ret;
 
        /* Nothing to do if memory encryption is not active */
@@ -2075,8 +2072,6 @@ static int __set_memory_enc_dec(unsigned
        if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
                addr &= PAGE_MASK;
 
-       start = addr;
-
        memset(&cpa, 0, sizeof(cpa));
        cpa.vaddr = &addr;
        cpa.numpages = numpages;
@@ -2091,7 +2086,7 @@ static int __set_memory_enc_dec(unsigned
        /*
         * Before changing the encryption attribute, we need to flush caches.
         */
-       cpa_flush_range(start, numpages, 1);
+       cpa_flush_range(addr, numpages, 1);
 
        ret = __change_page_attr_set_clr(&cpa, 1);
 
@@ -2102,7 +2097,7 @@ static int __set_memory_enc_dec(unsigned
         * in case TLB flushing gets optimized in the cpa_flush_range()
         * path use the same logic as above.
         */
-       cpa_flush_range(start, numpages, 0);
+       cpa_flush_range(addr, numpages, 0);
 
        return ret;
 }


Reply via email to