Re: [Xen-devel] [PATCH] xen: Add support for dom0 with Linux kernel 3.19 and newer

2016-01-22 Thread David Vrabel
On 21/01/16 20:13, Daniel Kiper wrote:
> Linux kernel commit 054954eb051f35e74b75a566a96fe756015352c8
> (xen: switch to linear virtual mapped sparse p2m list), which
> appeared in 3.19, introduced linear virtual mapped sparse p2m
> list. If readmem() reads p2m then it access this list using
> physical addresses. Sadly, VMA to physical address translation
> in crash requires access to p2m list. This means that we have
> a chicken and egg problem. In general this issue must be solved
> by introducing some changes in libxl, Linux kernel and crash
> (I have added this task to my long TODO list). However, in dom0
> case we can use crash_xen_info_t.dom0_pfn_to_mfn_frame_list_list
> which is available out of the box. So, let's use it and make
> at least some users happy.

I'm confused.  How does a virtual address to (pseudo-)physical address
lookup require access to the p2m?  Surely this is a walk of the page
tables followed by a M2P lookup on the MFN in the L1 PTE?

David

___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH] xen: Add support for dom0 with Linux kernel 3.19 and newer

2016-01-22 Thread Daniel Kiper
On Fri, Jan 22, 2016 at 10:03:34AM +, David Vrabel wrote:
> On 21/01/16 20:13, Daniel Kiper wrote:
> > Linux kernel commit 054954eb051f35e74b75a566a96fe756015352c8
> > (xen: switch to linear virtual mapped sparse p2m list), which
> > appeared in 3.19, introduced linear virtual mapped sparse p2m
> > list. If readmem() reads p2m then it access this list using
> > physical addresses. Sadly, VMA to physical address translation
> > in crash requires access to p2m list. This means that we have
> > a chicken and egg problem. In general this issue must be solved
> > by introducing some changes in libxl, Linux kernel and crash
> > (I have added this task to my long TODO list). However, in dom0
> > case we can use crash_xen_info_t.dom0_pfn_to_mfn_frame_list_list
> > which is available out of the box. So, let's use it and make
> > at least some users happy.
>
> I'm confused.  How does a virtual address to (pseudo-)physical address
> lookup require access to the p2m?  Surely this is a walk of the page
> tables followed by a M2P lookup on the MFN in the L1 PTE?

Correct but crash does it a bit backward and scans p2m. I am not sure way,
however, I am going to look at it during work on PV guests support.

Daniel

___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH] xen: Add support for dom0 with Linux kernel 3.19 and newer

2016-01-21 Thread Daniel Kiper
Linux kernel commit 054954eb051f35e74b75a566a96fe756015352c8
(xen: switch to linear virtual mapped sparse p2m list), which
appeared in 3.19, introduced linear virtual mapped sparse p2m
list. If readmem() reads p2m then it access this list using
physical addresses. Sadly, VMA to physical address translation
in crash requires access to p2m list. This means that we have
a chicken and egg problem. In general this issue must be solved
by introducing some changes in libxl, Linux kernel and crash
(I have added this task to my long TODO list). However, in dom0
case we can use crash_xen_info_t.dom0_pfn_to_mfn_frame_list_list
which is available out of the box. So, let's use it and make
at least some users happy.

Signed-off-by: Daniel Kiper 
---
 kernel.c   |   81 ++--
 xen_dom0.c |3 ++-
 xen_dom0.h |2 ++
 3 files changed, 77 insertions(+), 9 deletions(-)

diff --git a/kernel.c b/kernel.c
index 5ce2fb9..b07149e 100644
--- a/kernel.c
+++ b/kernel.c
@@ -17,6 +17,7 @@
 
 #include "defs.h"
 #include "xen_hyper_defs.h"
+#include "xen_dom0.h"
 #include 
 #include 
 #include 
@@ -61,6 +62,7 @@ static int restore_stack(struct bt_info *);
 static ulong __xen_m2p(ulonglong, ulong);
 static ulong __xen_pvops_m2p_l2(ulonglong, ulong);
 static ulong __xen_pvops_m2p_l3(ulonglong, ulong);
+static ulong __xen_pvops_m2p_hyper(ulonglong, ulong);
 static int search_mapping_page(ulong, ulong *, ulong *, ulong *);
 static void read_in_kernel_config_err(int, char *);
 static void BUG_bytes_init(void);
@@ -175,6 +177,9 @@ kernel_init()
>pvops_xen.p2m_mid_missing);
get_symbol_data("p2m_missing", sizeof(ulong),
>pvops_xen.p2m_missing);
+   } else if (symbol_exists("xen_p2m_addr")) {
+   if (!XEN_CORE_DUMPFILE())
+   error(FATAL, "p2m array in new format is 
unreadable.");
} else {
kt->pvops_xen.p2m_top_entries = 
get_array_length("p2m_top", NULL, 0);
kt->pvops_xen.p2m_top = symbol_value("p2m_top");
@@ -5850,12 +5855,14 @@ no_cpu_flags:
else
fprintf(fp, "\n");
 
-   fprintf(fp, "  pvops_xen:\n");
-   fprintf(fp, "p2m_top: %lx\n", 
kt->pvops_xen.p2m_top);
-   fprintf(fp, "p2m_top_entries: %d\n", 
kt->pvops_xen.p2m_top_entries);
-   if (symbol_exists("p2m_mid_missing"))
-   fprintf(fp, "p2m_mid_missing: %lx\n", 
kt->pvops_xen.p2m_mid_missing);
-   fprintf(fp, "p2m_missing: %lx\n", 
kt->pvops_xen.p2m_missing);
+   if (!symbol_exists("xen_p2m_addr")) {
+   fprintf(fp, "  pvops_xen:\n");
+   fprintf(fp, "p2m_top: %lx\n", 
kt->pvops_xen.p2m_top);
+   fprintf(fp, "p2m_top_entries: %d\n", 
kt->pvops_xen.p2m_top_entries);
+   if (symbol_exists("p2m_mid_missing"))
+   fprintf(fp, "p2m_mid_missing: %lx\n", 
kt->pvops_xen.p2m_mid_missing);
+   fprintf(fp, "p2m_missing: %lx\n", 
kt->pvops_xen.p2m_missing);
+   }
 }
 
 /*
@@ -8873,6 +8880,12 @@ __xen_m2p(ulonglong machine, ulong mfn)
ulong c, i, kmfn, mapping, p, pfn;
ulong start, end;
ulong *mp = (ulong *)kt->m2p_page;
+   int memtype;
+
+   if (XEN_CORE_DUMPFILE() && symbol_exists("xen_p2m_addr"))
+   memtype = PHYSADDR;
+   else
+   memtype = KVADDR;
 
/*
 *  Check the FIFO cache first.
@@ -8883,13 +8896,19 @@ __xen_m2p(ulonglong machine, ulong mfn)
 (mfn <= kt->p2m_mapping_cache[c].end))) { 
 
if (kt->p2m_mapping_cache[c].mapping != 
kt->last_mapping_read) {
-   if (!readmem(kt->p2m_mapping_cache[c].mapping, 
KVADDR, 
+   if (memtype == PHYSADDR)
+   pc->curcmd_flags |= XEN_MACHINE_ADDR;
+
+   if (!readmem(kt->p2m_mapping_cache[c].mapping, 
memtype,
mp, PAGESIZE(), "phys_to_machine_mapping 
page (cached)", 
RETURN_ON_ERROR))
error(FATAL, "cannot access "
"phys_to_machine_mapping page\n");
else
kt->last_mapping_read = 
kt->p2m_mapping_cache[c].mapping;
+
+   if (memtype == PHYSADDR)
+   pc->curcmd_flags &= ~XEN_MACHINE_ADDR;
} else
kt->p2m_page_cache_hits++;
 
@@ -8919,11 +8938,13 @@ __xen_m2p(ulonglong machine,