Hi,
NIT Title: Please remove full stop.
On 9/26/19 10:46 AM, hong...@amazon.com wrote:
From: Hongyan Xia <hong...@amazon.com>
Please provide a description of what/why you are doing this in the
commit message.
Also, IIRC, x86 always have !CONFIG_SEPARATE_XENHEAP. So can you explain
why the path with separate xenheap is also modified?
Signed-off-by: Hongyan Xia <hong...@amazon.com>
---
xen/common/page_alloc.c | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7cb1bd368b..4ec6299ba8 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2143,6 +2143,7 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe)
void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
{
struct page_info *pg;
+ void *ret;
ASSERT(!in_irq());
@@ -2151,7 +2152,10 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
if ( unlikely(pg == NULL) )
return NULL;
- memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT));
+ ret = page_to_virt(pg);
+ memguard_unguard_range(ret, 1 << (order + PAGE_SHIFT));
+ map_pages_to_xen((unsigned long)ret, page_to_mfn(pg),
+ 1UL << order, PAGE_HYPERVISOR);
As mentioned earlier on for Arm, xenheap will always be mapped. So
unless you have plan to tackle the Arm side as well, we should make sure
that the behavior is not changed for Arm.
It feels to me we want to introduce a new Kconfig that is selected by
x86 to tell whether the direct map is mapped. I would then implement
maybe in xen/mm.h two stub (one for when the config is selected, the
other when it is not).
return page_to_virt(pg);
}
@@ -2165,6 +2169,8 @@ void free_xenheap_pages(void *v, unsigned int order)
return;
memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
+ ASSERT((unsigned long)v >= DIRECTMAP_VIRT_START);
This define does not exist for Arm32 so it will break compilation.
+ map_pages_to_xen((unsigned long)v, INVALID_MFN, 1UL << order, _PAGE_NONE);
free_heap_pages(virt_to_page(v), order, false);
}
@@ -2189,6 +2195,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned
int memflags)
{
struct page_info *pg;
unsigned int i;
+ void *ret;
ASSERT(!in_irq());
@@ -2204,7 +2211,11 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
for ( i = 0; i < (1u << order); i++ )
pg[i].count_info |= PGC_xen_heap;
- return page_to_virt(pg);
+ ret = page_to_virt(pg);
+ map_pages_to_xen((unsigned long)ret, page_to_mfn(pg),
+ 1UL << order, PAGE_HYPERVISOR);
+
+ return ret;
}
void free_xenheap_pages(void *v, unsigned int order)
@@ -2222,6 +2233,9 @@ void free_xenheap_pages(void *v, unsigned int order)
for ( i = 0; i < (1u << order); i++ )
pg[i].count_info &= ~PGC_xen_heap;
+ ASSERT((unsigned long)v >= DIRECTMAP_VIRT_START);
+ map_pages_to_xen((unsigned long)v, INVALID_MFN, 1UL << order, _PAGE_NONE);
+
free_heap_pages(pg, order, true);
}
Cheers,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel