From dadb5b4b21c904ce59024c686eb1c55be8f46c52 Mon Sep 17 00:00:00 2001
From: Julien Grall <jgrall@amazon.com>
Date: Thu, 21 Jan 2021 10:16:08 +0000
Subject: [PATCH] xen/page_alloc: Only flush the page to RAM once we know they
 are scrubbed

At the moment, each page are flushed to RAM just after the allocator
found some free pages. However, this is happening before check if the
page was scrubbed.

As a consequence, on Arm, a guest may be able to access the old content
of the scrubbed pages if it has cache disabled (default at boot) and
the content didn't reach the Point of Coherency.

The flush is now moved after we know the content of the page will not
change. This also has the benefit to reduce the amount of work happening
with the heap_lock held.

This is XSA-364.

Fixes: 307c3be3ccb2 ("mm: Don't scrub pages while holding heap lock in alloc_heap_pages()")
Signed-off-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
 xen/common/page_alloc.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 02ac1fa613e7..1744e6faa5c4 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -924,6 +924,7 @@ static struct page_info *alloc_heap_pages(
     bool need_tlbflush = false;
     uint32_t tlbflush_timestamp = 0;
     unsigned int dirty_cnt = 0;
+    mfn_t mfn;
 
     /* Make sure there are enough bits in memflags for nodeID. */
     BUILD_BUG_ON((_MEMF_bits - _MEMF_node) < (8 * sizeof(nodeid_t)));
@@ -1022,11 +1023,6 @@ static struct page_info *alloc_heap_pages(
         pg[i].u.inuse.type_info = 0;
         page_set_owner(&pg[i], NULL);
 
-        /* Ensure cache and RAM are consistent for platforms where the
-         * guest can control its own visibility of/through the cache.
-         */
-        flush_page_to_ram(mfn_x(page_to_mfn(&pg[i])),
-                          !(memflags & MEMF_no_icache_flush));
     }
 
     spin_unlock(&heap_lock);
@@ -1062,6 +1058,14 @@ static struct page_info *alloc_heap_pages(
     if ( need_tlbflush )
         filtered_flush_tlb_mask(tlbflush_timestamp);
 
+    /*
+     * Ensure cache and RAM are consistent for platforms where the guest
+     * can control its own visibility of/through the cache.
+     */
+    mfn = page_to_mfn(pg);
+    for ( i = 0; i < (1U << order); i++ )
+        flush_page_to_ram(mfn_x(mfn) + i, !(memflags & MEMF_no_icache_flush));
+
     return pg;
 }
 
-- 
2.17.1

