Split out the allocation and free routines to be used in a follow
up set of patches (to reuse for L1D flushing).

Signed-off-by: Balbir Singh <sbl...@amazon.com>
Reviewed-by: Kees Cook <keesc...@chromium.org>
---
 arch/x86/include/asm/cacheflush.h |  3 +++
 arch/x86/kernel/Makefile          |  1 +
 arch/x86/kernel/l1d_flush.c       | 36 +++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/vmx.c            | 25 +++------------------
 4 files changed, 43 insertions(+), 22 deletions(-)
 create mode 100644 arch/x86/kernel/l1d_flush.c

diff --git a/arch/x86/include/asm/cacheflush.h 
b/arch/x86/include/asm/cacheflush.h
index 63feaf2a5f93..bac56fcd9790 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -6,6 +6,9 @@
 #include <asm-generic/cacheflush.h>
 #include <asm/special_insns.h>
 
+#define L1D_CACHE_ORDER 4
 void clflush_cache_range(void *addr, unsigned int size);
+void *l1d_flush_alloc_pages(void);
+void l1d_flush_cleanup_pages(void *l1d_flush_pages);
 
 #endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e77261db2391..c17c1e3c1a0b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -157,3 +157,4 @@ ifeq ($(CONFIG_X86_64),y)
 endif
 
 obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT)   += ima_arch.o
+obj-y                                          += l1d_flush.o
diff --git a/arch/x86/kernel/l1d_flush.c b/arch/x86/kernel/l1d_flush.c
new file mode 100644
index 000000000000..d605878c8f28
--- /dev/null
+++ b/arch/x86/kernel/l1d_flush.c
@@ -0,0 +1,36 @@
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+
+void *l1d_flush_alloc_pages(void)
+{
+       struct page *page;
+       void *l1d_flush_pages = NULL;
+       int i;
+
+       /*
+        * This allocation for l1d_flush_pages is not tied to a VM/task's
+        * lifetime and so should not be charged to a memcg.
+        */
+       page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+       if (!page)
+               return NULL;
+       l1d_flush_pages = page_address(page);
+
+       /*
+        * Initialize each page with a different pattern in
+        * order to protect against KSM in the nested
+        * virtualization case.
+        */
+       for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+               memset(l1d_flush_pages + i * PAGE_SIZE, i + 1,
+                               PAGE_SIZE);
+       }
+       return l1d_flush_pages;
+}
+EXPORT_SYMBOL_GPL(l1d_flush_alloc_pages);
+
+void l1d_flush_cleanup_pages(void *l1d_flush_pages)
+{
+       free_pages((unsigned long)l1d_flush_pages, L1D_CACHE_ORDER);
+}
+EXPORT_SYMBOL_GPL(l1d_flush_cleanup_pages);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 00d31a5e0089..f35654db904a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -203,14 +203,10 @@ static const struct {
        [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
 };
 
-#define L1D_CACHE_ORDER 4
 static void *vmx_l1d_flush_pages;
 
 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
 {
-       struct page *page;
-       unsigned int i;
-
        if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
                l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
                return 0;
@@ -253,24 +249,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state 
l1tf)
 
        if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
            !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
-               /*
-                * This allocation for vmx_l1d_flush_pages is not tied to a VM
-                * lifetime and so should not be charged to a memcg.
-                */
-               page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
-               if (!page)
+               vmx_l1d_flush_pages = l1d_flush_alloc_pages();
+               if (!vmx_l1d_flush_pages)
                        return -ENOMEM;
-               vmx_l1d_flush_pages = page_address(page);
-
-               /*
-                * Initialize each page with a different pattern in
-                * order to protect against KSM in the nested
-                * virtualization case.
-                */
-               for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
-                       memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
-                              PAGE_SIZE);
-               }
        }
 
        l1tf_vmx_mitigation = l1tf;
@@ -8099,7 +8080,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
 static void vmx_cleanup_l1d_flush(void)
 {
        if (vmx_l1d_flush_pages) {
-               free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+               l1d_flush_cleanup_pages(vmx_l1d_flush_pages);
                vmx_l1d_flush_pages = NULL;
        }
        /* Restore state so sysfs ignores VMX */
-- 
2.17.1

Reply via email to