Hi,
From the title, this patch is doing two things:
1) Map bootmem_region_list
2) Fix double unmap bug
It is not entirely clear how the latter is related to the former. Can
you explain it?
On 9/26/19 10:46 AM, hong...@amazon.com wrote:
From: Hongyan Xia <hong...@amazon.com>
Please provide a commit message description.
Signed-off-by: Hongyan Xia <hong...@amazon.com>
---
xen/arch/x86/pv/dom0_build.c | 2 +-
xen/common/page_alloc.c | 12 ++++++++++--
2 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 202edcaa17..1555a61b84 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -236,7 +236,7 @@ static __init void setup_pv_physmap(struct domain *d,
unsigned long pgtbl_pfn,
if ( pl3e )
unmap_domain_page(pl3e);
- unmap_domain_page(l4start);
+ //unmap_domain_page(l4start);
I guess you wanted to remove it completely and not comment it?
}
static struct page_info * __init alloc_chunk(struct domain *d,
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index deeeac065c..6acc1c78a4 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -247,6 +247,7 @@ mfn_t first_valid_mfn = INVALID_MFN_INITIALIZER;
static struct bootmem_region {
unsigned long s, e; /* MFNs @s through @e-1 inclusive are free */
} *__initdata bootmem_region_list;
+struct page_info *bootmem_region_list_pg;
I guess this should be static. But...
static unsigned int __initdata nr_bootmem_regions;
struct scrub_region {
@@ -264,7 +265,11 @@ static void __init bootmem_region_add(unsigned long s,
unsigned long e)
unsigned int i;
if ( (bootmem_region_list == NULL) && (s < e) )
- bootmem_region_list = mfn_to_virt(s++);
+ {
+ bootmem_region_list_pg = mfn_to_page(_mfn(s));
... at least on Arm, the frametable is allocated after the boot
allocator has been initialized. So mfn_to_page() will not work properly
here.
+ bootmem_region_list = map_domain_page(_mfn(s));
So I would suggest to look at statically allocating the
bootmem_region_list. This was actually discussed recently as part of
on-going problem with Arm32 (see [1]).
I am planning to have a look after I finish some important bug fixes for
Xen 4.13. But feel free to have a look.
+ s++;
+ }
if ( s >= e )
return;
@@ -1869,7 +1874,10 @@ void __init end_boot_allocator(void)
init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
}
nr_bootmem_regions = 0;
- init_heap_pages(virt_to_page(bootmem_region_list), 1);
+ init_heap_pages(bootmem_region_list_pg, 1);
+ /* Remember to discard the mapping for bootmem_region_list. */
+ unmap_domain_page(bootmem_region_list);
+ flush_tlb_one_local(bootmem_region_list);
if ( !dma_bitsize && (num_online_nodes() > 1) )
dma_bitsize = arch_get_dma_bitsize();
Cheers,
[1] https://lists.xen.org/archives/html/xen-devel/2019-09/msg01407.html
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel