Re: [Xen-devel] [RFC PATCH 81/84] x86/mm: optimise and properly unmap pages in virt_to_mfn_walk().

2019-09-26 Thread Wei Liu
On Thu, Sep 26, 2019 at 10:46:44AM +0100, hong...@amazon.com wrote:
> From: Hongyan Xia 
> 
> This also resolves a mapcache overflow bug.


This should be squashed into the patch that touched virt_to_mfn_walk.

Wei.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [RFC PATCH 81/84] x86/mm: optimise and properly unmap pages in virt_to_mfn_walk().

2019-09-26 Thread hongyax
From: Hongyan Xia 

This also resolves a mapcache overflow bug.

Signed-off-by: Hongyan Xia 
---
 xen/arch/x86/mm.c| 57 +---
 xen/include/asm-x86/mm.h |  3 ++-
 2 files changed, 38 insertions(+), 22 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 39ba9f9bf4..f3c9042ba6 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5056,42 +5056,57 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
 return pl1e;
 }
 
+/*
+ * Unlike virt_to_mfn() which just translates between the direct map and the
+ * mfn, this version actually walks the page table to find the mfn of any
+ * virtual address, as long as it is mapped. If not, INVALID_MFN is returned.
+ */
 unsigned long virt_to_mfn_walk(void *va)
 {
 unsigned long ret;
-l3_pgentry_t *pl3e;
-l2_pgentry_t *pl2e;
-l1_pgentry_t *pl1e;
-
-/*
- * FIXME: This is rather unoptimised, because e.g. virt_to_xen_l2e
- * recomputes virt_to_xen_l3e again. Clearly one can keep the result and
- * carry on.
- */
+unsigned long v = (unsigned long)va;
+l3_pgentry_t *pl3e = NULL;
+l2_pgentry_t *pl2e = NULL;
+l1_pgentry_t *pl1e = NULL;
 
-pl3e = virt_to_xen_l3e((unsigned long)(va));
-BUG_ON(!(l3e_get_flags(*pl3e) & _PAGE_PRESENT));
+pl3e = virt_to_xen_l3e(v);
+if ( !pl3e || !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
+{
+ret = mfn_x(INVALID_MFN);
+goto out;
+}
 if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
 {
 ret = l3e_get_pfn(*pl3e);
-ret |= (((unsigned long)va & ((1UL << L3_PAGETABLE_SHIFT)-1)) >> 
PAGE_SHIFT);
-unmap_xen_pagetable(pl3e);
-return ret;
+ret |= ((v & ((1UL << L3_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
+goto out;
 }
 
-pl2e = virt_to_xen_l2e((unsigned long)(va));
-BUG_ON(!(l2e_get_flags(*pl2e) & _PAGE_PRESENT));
+pl2e = (l2_pgentry_t *)map_xen_pagetable(l3e_get_mfn(*pl3e))
++ l2_table_offset(v);
+if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
+{
+ret = mfn_x(INVALID_MFN);
+goto out;
+}
 if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
 {
 ret = l2e_get_pfn(*pl2e);
-ret |= (((unsigned long)va & ((1UL << L2_PAGETABLE_SHIFT)-1)) >> 
PAGE_SHIFT);
-unmap_xen_pagetable(pl2e);
-return ret;
+ret |= ((v & ((1UL << L2_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
+goto out;
 }
 
-pl1e = virt_to_xen_l1e((unsigned long)(va));
-BUG_ON(!(l1e_get_flags(*pl1e) & _PAGE_PRESENT));
+pl1e = (l1_pgentry_t *)map_xen_pagetable(l2e_get_mfn(*pl2e))
++ l1_table_offset(v);
+if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
+{
+ret = mfn_x(INVALID_MFN);
+goto out;
+}
 ret = l1e_get_pfn(*pl1e);
+out:
+unmap_xen_pagetable(pl3e);
+unmap_xen_pagetable(pl2e);
 unmap_xen_pagetable(pl1e);
 return ret;
 }
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index e5819cbfdf..411737207f 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -645,7 +645,8 @@ void free_xen_pagetable(mfn_t mfn);
 l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
 unsigned long virt_to_mfn_walk(void *va);
 struct page_info *virt_to_page_walk(void *va);
-#define virt_to_maddr_walk(va) mfn_to_maddr(_mfn(virt_to_mfn_walk(va)))
+#define virt_to_maddr_walk(va) (mfn_to_maddr(_mfn(virt_to_mfn_walk(va))) | \
+((unsigned long)va & (PAGE_SIZE - 1)))
 
 DECLARE_PER_CPU(mfn_t, root_pgt_mfn);
 
-- 
2.17.1


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel