On 10.03.21 20:37, Igor Druzhinin wrote:
On 30.01.21 19:53, Roman Shaposhnik wrote:
On Fri, Jan 29, 2021 at 11:28 PM Jürgen Groß <[email protected]> wrote:

On 29.01.21 21:12, Roman Shaposhnik wrote:
Hi!

I'm trying to see how much mileage I can get out of
crash(1) 7.2.8 (based on gdb 7.6) when it comes to analyzing crash
dumps taken via xl dump-core (this is all on x86_64 with stock Xen
v. 4.14).

The good news is that the image actually does load up but it throws
the following WARNINGs in the process:

WARNING: cannot access vmalloc'd module memory
crash: read error: kernel virtual address: ffffffff93613480  type:
"fill_task_struct"
WARNING: active task ffffffff93613480 on cpu 0 not found in PID hash
crash: read error: kernel virtual address: ffffffff93613480  type:
"fill_task_struct"
WARNING: cannot read log_buf contents

And then the info that it gives me around basic things like ps, mod,
log, etc. is really super limited (and I am now suspecting may even
be wrong).

Since I was primarily after dmesg/log initially, I tried:
crash> log
log: WARNING: cannot read log_buf contents

Then I tried taking an xl dump-core from the domU that was still
very much alive and happy and got similar results -- so it clearly
doesn't seem to be related to the state domU is in.

As matter of fact, I actually got to the desired dmesg output by
simply running strings(1) on the core file -- so the info is
definitely there -- but I guess some kind of index/reference maybe
broken.

With all that in mind, if there's anyone on this ML who has recently
done Xen DomU crash dump analysis -- I would definitely appreciate
the pointers!

For me it just works (openSUSE).

Can you please run:

crash --version and readelf -a XXXX (on the xl dump-core output) and
post the results?

# crash --version

crash 7.2.1

I tried to build this version but I still get the following while trying to 
open a dump file
produced by "xl dump-core":

[root@lcy2-dt92 crash]# ./crash ../vmlinux-5.8.0-44-generic ../xxx.dmp

crash 7.2.1
Copyright (C) 2002-2017  Red Hat, Inc.
Copyright (C) 2004, 2005, 2006, 2010  IBM Corporation
Copyright (C) 1999-2006  Hewlett-Packard Co
Copyright (C) 2005, 2006, 2011, 2012  Fujitsu Limited
Copyright (C) 2006, 2007  VA Linux Systems Japan K.K.
Copyright (C) 2005, 2011  NEC Corporation
Copyright (C) 1999, 2002, 2007  Silicon Graphics, Inc.
Copyright (C) 1999, 2000, 2001, 2002  Mission Critical Linux, Inc.
This program is free software, covered by the GNU General Public License,
and you are welcome to change it and/or distribute copies of it under
certain conditions.  Enter "help copying" to see the conditions.
This program has absolutely no warranty.  Enter "help warranty" for details.

GNU gdb (GDB) 7.6
Copyright (C) 2013 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.  Type "show copying"
and "show warranty" for details.
This GDB was configured as "x86_64-unknown-linux-gnu"...

crash: cannot determine base kernel version
crash: ../vmlinux-5.8.0-44-generic and ../xxx.dmp do not match!


Am I doing something wrong? How do I need to use crash for it to work?

Could you try the attached patch?


Juergen

From e7265739e0db957176261236be53c0c270c2efa1 Mon Sep 17 00:00:00 2001
From: Juergen Gross <[email protected]>
Date: Thu, 18 Mar 2021 14:26:24 +0100
Subject: [PATCH] tools/libs/ctrl: fix xc_core_arch_map_p2m() to support linear
 p2m table

The core of a pv linux guest produced via "xl dump-core" is nor usable
as since kernel 4.14 only the linear p2m table is kept if Xen indicates
it is supporting that. Unfortunately xc_core_arch_map_p2m() is still
supporting the 3-level p2m tree only.

Fix that by copying the functionality of map_p2m() from libxenguest to
libxenctrl.

Fixes: dc6d60937121 ("libxc: set flag for support of linear p2m list in domain builder")
Signed-off-by: Juergen Gross <[email protected]>
---
This is a backport candidate.
---
 tools/libs/ctrl/xc_core_x86.c | 235 ++++++++++++++++++++++++++++++----
 tools/libs/ctrl/xc_private.h  |   1 +
 2 files changed, 208 insertions(+), 28 deletions(-)

diff --git a/tools/libs/ctrl/xc_core_x86.c b/tools/libs/ctrl/xc_core_x86.c
index cb76e6207b..a8806efb4a 100644
--- a/tools/libs/ctrl/xc_core_x86.c
+++ b/tools/libs/ctrl/xc_core_x86.c
@@ -17,6 +17,7 @@
  *
  */
 
+#include <inttypes.h>
 #include "xc_private.h"
 #include "xc_core.h"
 #include <xen/hvm/e820.h>
@@ -65,34 +66,169 @@ xc_core_arch_memory_map_get(xc_interface *xch, struct xc_core_arch_context *unus
     return 0;
 }
 
-static int
-xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo, xc_dominfo_t *info,
-                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
-                        unsigned long *pfnp, int rw)
+static inline bool is_canonical_address(uint64_t vaddr)
 {
-    /* Double and single indirect references to the live P2M table */
-    xen_pfn_t *live_p2m_frame_list_list = NULL;
-    xen_pfn_t *live_p2m_frame_list = NULL;
-    /* Copies of the above. */
-    xen_pfn_t *p2m_frame_list_list = NULL;
-    xen_pfn_t *p2m_frame_list = NULL;
+    return ((int64_t)vaddr >> 47) == ((int64_t)vaddr >> 63);
+}
 
-    uint32_t dom = info->domid;
-    int ret = -1;
-    int err;
-    int i;
+/* Virtual address ranges reserved for hypervisor. */
+#define HYPERVISOR_VIRT_START_X86_64 0xFFFF800000000000ULL
+#define HYPERVISOR_VIRT_END_X86_64   0xFFFF87FFFFFFFFFFULL
 
-    if ( xc_domain_nr_gpfns(xch, info->domid, &dinfo->p2m_size) < 0 )
+#define HYPERVISOR_VIRT_START_X86_32 0x00000000F5800000ULL
+#define HYPERVISOR_VIRT_END_X86_32   0x00000000FFFFFFFFULL
+
+static xen_pfn_t *
+xc_core_arch_map_p2m_list_rw(xc_interface *xch, struct domain_info_context *dinfo,
+                             uint32_t dom, shared_info_any_t *live_shinfo,
+                             uint64_t p2m_cr3)
+{
+    uint64_t p2m_vaddr, p2m_end, mask, off;
+    xen_pfn_t p2m_mfn, mfn, saved_mfn, max_pfn;
+    uint64_t *ptes = NULL;
+    xen_pfn_t *mfns = NULL;
+    unsigned int fpp, n_pages, level, n_levels, shift,
+                 idx_start, idx_end, idx, saved_idx;
+
+    p2m_vaddr = GET_FIELD(live_shinfo, arch.p2m_vaddr, dinfo->guest_width);
+    fpp = PAGE_SIZE / dinfo->guest_width;
+    dinfo->p2m_frames = (dinfo->p2m_size - 1) / fpp + 1;
+    p2m_end = p2m_vaddr + dinfo->p2m_frames * PAGE_SIZE - 1;
+
+    if ( dinfo->guest_width == 8 )
     {
-        ERROR("Could not get maximum GPFN!");
-        goto out;
+        mask = 0x0000ffffffffffffULL;
+        n_levels = 4;
+        p2m_mfn = p2m_cr3 >> 12;
+        if ( !is_canonical_address(p2m_vaddr) ||
+             !is_canonical_address(p2m_end) ||
+             p2m_end < p2m_vaddr ||
+             (p2m_vaddr <= HYPERVISOR_VIRT_END_X86_64 &&
+              p2m_end > HYPERVISOR_VIRT_START_X86_64) )
+        {
+            ERROR("Bad virtual p2m address range %#" PRIx64 "-%#" PRIx64,
+                  p2m_vaddr, p2m_end);
+            errno = ERANGE;
+            goto out;
+        }
+    }
+    else
+    {
+        mask = 0x00000000ffffffffULL;
+        n_levels = 3;
+        if ( p2m_cr3 & ~mask )
+            p2m_mfn = ~0UL;
+        else
+            p2m_mfn = (uint32_t)((p2m_cr3 >> 12) | (p2m_cr3 << 20));
+        if ( p2m_vaddr > mask || p2m_end > mask || p2m_end < p2m_vaddr ||
+             (p2m_vaddr <= HYPERVISOR_VIRT_END_X86_32 &&
+              p2m_end > HYPERVISOR_VIRT_START_X86_32) )
+        {
+            ERROR("Bad virtual p2m address range %#" PRIx64 "-%#" PRIx64,
+                  p2m_vaddr, p2m_end);
+            errno = ERANGE;
+            goto out;
+        }
     }
 
-    if ( dinfo->p2m_size < info->nr_pages  )
+    mfns = malloc(sizeof(*mfns));
+    if ( !mfns )
     {
-        ERROR("p2m_size < nr_pages -1 (%lx < %lx", dinfo->p2m_size, info->nr_pages - 1);
+        ERROR("Cannot allocate memory for array of %u mfns", 1);
         goto out;
     }
+    mfns[0] = p2m_mfn;
+    off = 0;
+    saved_mfn = 0;
+    idx_start = idx_end = saved_idx = 0;
+
+    for ( level = n_levels; level > 0; level-- )
+    {
+        n_pages = idx_end - idx_start + 1;
+        ptes = xc_map_foreign_pages(xch, dom, PROT_READ, mfns, n_pages);
+        if ( !ptes )
+        {
+            PERROR("Failed to map %u page table pages for p2m list", n_pages);
+            goto out;
+        }
+        free(mfns);
+
+        shift = level * 9 + 3;
+        idx_start = ((p2m_vaddr - off) & mask) >> shift;
+        idx_end = ((p2m_end - off) & mask) >> shift;
+        idx = idx_end - idx_start + 1;
+        mfns = malloc(sizeof(*mfns) * idx);
+        if ( !mfns )
+        {
+            ERROR("Cannot allocate memory for array of %u mfns", idx);
+            goto out;
+        }
+
+        for ( idx = idx_start; idx <= idx_end; idx++ )
+        {
+            mfn = (ptes[idx] & 0x000ffffffffff000ULL) >> PAGE_SHIFT;
+            if ( mfn == 0 )
+            {
+                ERROR("Bad mfn %#lx during page table walk for vaddr %#" PRIx64 " at level %d of p2m list",
+                      mfn, off + ((uint64_t)idx << shift), level);
+                errno = ERANGE;
+                goto out;
+            }
+            mfns[idx - idx_start] = mfn;
+
+            /* Maximum pfn check at level 2. Same reasoning as for p2m tree. */
+            if ( level == 2 )
+            {
+                if ( mfn != saved_mfn )
+                {
+                    saved_mfn = mfn;
+                    saved_idx = idx - idx_start;
+                }
+            }
+        }
+
+        if ( level == 2 )
+        {
+            if ( saved_idx == idx_end )
+                saved_idx++;
+            max_pfn = ((xen_pfn_t)saved_idx << 9) * fpp;
+            if ( max_pfn < dinfo->p2m_size )
+            {
+                dinfo->p2m_size = max_pfn;
+                dinfo->p2m_frames = (dinfo->p2m_size + fpp - 1) / fpp;
+                p2m_end = p2m_vaddr + dinfo->p2m_frames * PAGE_SIZE - 1;
+                idx_end = idx_start + saved_idx;
+            }
+        }
+
+        munmap(ptes, n_pages * PAGE_SIZE);
+        ptes = NULL;
+        off = p2m_vaddr & ((mask >> shift) << shift);
+    }
+
+    return mfns;
+
+ out:
+    free(mfns);
+    if ( ptes )
+        munmap(ptes, n_pages * PAGE_SIZE);
+
+    return NULL;
+}
+
+static xen_pfn_t *
+xc_core_arch_map_p2m_tree_rw(xc_interface *xch, struct domain_info_context *dinfo,
+                             uint32_t dom, shared_info_any_t *live_shinfo)
+{
+    /* Double and single indirect references to the live P2M table */
+    xen_pfn_t *live_p2m_frame_list_list;
+    xen_pfn_t *live_p2m_frame_list = NULL;
+    /* Copies of the above. */
+    xen_pfn_t *p2m_frame_list_list = NULL;
+    xen_pfn_t *p2m_frame_list;
+
+    int err;
+    int i;
 
     live_p2m_frame_list_list =
         xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_READ,
@@ -151,10 +287,61 @@ xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo, xc
         for ( i = P2M_FL_ENTRIES - 1; i >= 0; i-- )
             p2m_frame_list[i] = ((uint32_t *)p2m_frame_list)[i];
 
+    dinfo->p2m_frames = P2M_FL_ENTRIES;
+
+    return p2m_frame_list;
+
+ out:
+    err = errno;
+
+    if ( live_p2m_frame_list_list )
+        munmap(live_p2m_frame_list_list, PAGE_SIZE);
+
+    if ( live_p2m_frame_list )
+        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
+
+    free(p2m_frame_list_list);
+
+    errno = err;
+
+    return NULL;
+}
+
+static int
+xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo, xc_dominfo_t *info,
+                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+                        unsigned long *pfnp, int rw)
+{
+    xen_pfn_t *p2m_frame_list = NULL;
+    uint64_t p2m_cr3;
+    uint32_t dom = info->domid;
+    int ret = -1;
+    int err;
+
+    if ( xc_domain_nr_gpfns(xch, info->domid, &dinfo->p2m_size) < 0 )
+    {
+        ERROR("Could not get maximum GPFN!");
+        goto out;
+    }
+
+    if ( dinfo->p2m_size < info->nr_pages  )
+    {
+        ERROR("p2m_size < nr_pages -1 (%lx < %lx", dinfo->p2m_size, info->nr_pages - 1);
+        goto out;
+    }
+
+    p2m_cr3 = GET_FIELD(live_shinfo, arch.p2m_cr3, dinfo->guest_width);
+
+    p2m_frame_list = p2m_cr3 ? xc_core_arch_map_p2m_list_rw(xch, dinfo, dom, live_shinfo, p2m_cr3)
+                             : xc_core_arch_map_p2m_tree_rw(xch, dinfo, dom, live_shinfo);
+
+    if ( !p2m_frame_list )
+        goto out;
+
     *live_p2m = xc_map_foreign_pages(xch, dom,
                                     rw ? (PROT_READ | PROT_WRITE) : PROT_READ,
                                     p2m_frame_list,
-                                    P2M_FL_ENTRIES);
+                                    dinfo->p2m_frames);
 
     if ( !*live_p2m )
     {
@@ -169,14 +356,6 @@ xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo, xc
 out:
     err = errno;
 
-    if ( live_p2m_frame_list_list )
-        munmap(live_p2m_frame_list_list, PAGE_SIZE);
-
-    if ( live_p2m_frame_list )
-        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
-
-    free(p2m_frame_list_list);
-
     free(p2m_frame_list);
 
     errno = err;
diff --git a/tools/libs/ctrl/xc_private.h b/tools/libs/ctrl/xc_private.h
index f0b5f83ac8..8ebc0b59da 100644
--- a/tools/libs/ctrl/xc_private.h
+++ b/tools/libs/ctrl/xc_private.h
@@ -79,6 +79,7 @@ struct iovec {
 
 struct domain_info_context {
     unsigned int guest_width;
+    unsigned int p2m_frames;
     unsigned long p2m_size;
 };
 
-- 
2.26.2

Attachment: OpenPGP_0xB0DE9DD628BF132F.asc
Description: application/pgp-keys

Attachment: OpenPGP_signature
Description: OpenPGP digital signature

Reply via email to