Hi Penny,
On 10/05/2022 03:27, Penny Zheng wrote:
When static domain populates memory through populate_physmap on runtime,
Typo: s/when static/when a static/ or "when static domains populate"
s/on runtime/at runtime/
other than allocating from heap, it shall retrieve reserved pages from
I am not sure to understand the part before the comma. But it doens't
sound necessary so maybe drop it?
resv_page_list to make sure that guest RAM is still restricted in statically
configured memory regions. And this commit introduces a new helper
acquire_reserved_page to make it work.
Signed-off-by: Penny Zheng <penny.zh...@arm.com>
[...]
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 290526adaf..06e7037a28 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2740,8 +2740,8 @@ static struct page_info * __init
acquire_staticmem_pages(mfn_t smfn,
* Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
* then assign them to one specific domain #d.
*/
-int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
- unsigned int nr_mfns, unsigned int memflags)
+int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
+ unsigned int memflags)
{
struct page_info *pg;
@@ -2769,12 +2769,43 @@ int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
return 0;
}
+
+/*
+ * Acquire a page from reserved page list(resv_page_list), when populating
+ * memory for static domain on runtime.
+ */
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
+{
+ struct page_info *page;
+ mfn_t smfn;
+
+ /* Acquire a page from reserved page list(resv_page_list). */
+ page = page_list_remove_head(&d->resv_page_list);
Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock (mostly like d->page_alloc_lock).
+ if ( unlikely(!page) )
+ return INVALID_MFN;
+
+ smfn = page_to_mfn(page);
+
+ if ( acquire_domstatic_pages(d, smfn, 1, memflags) )
I am OK if we call acquire_domstatic_pages() for now. But long term, I
think we should consider to optimize it because we know the page is
valid and belong to the guest. So there are a lot of pointless work
(checking mfn_valid(), scrubbing in the free part, cleaning the cache...).
+ {
+ page_list_add_tail(page, &d->resv_page_list);
+ return INVALID_MFN;
+ }
+
+ return smfn;
+}
#else
void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
bool need_scrub)
{
ASSERT_UNREACHABLE();
}
+
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
+{
+ ASSERT_UNREACHABLE();
+ return INVALID_MFN;
+}
#endif
/*
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 35dc7143a4..c613afa57e 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -38,6 +38,10 @@ void arch_get_domain_info(const struct domain *d,
#define CDF_staticmem (1U << 2)
#endif
+#ifndef is_domain_using_staticmem
+#define is_domain_using_staticmem(d) ((void)(d), false)
+#endif
+
/*
* Arch-specifics.
*/
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 9fd95deaec..74810e1f54 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -92,6 +92,7 @@ void free_staticmem_pages(struct page_info *pg, unsigned long
nr_mfns,
int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int
nr_mfns,
unsigned int memflags);
#endif
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
/* Map machine page range in Xen virtual address space. */
int map_pages_to_xen(
Cheers,
--
Julien Grall