vmcore_list has memory map information in the 1st kernel, each of
which represents position and size of the objects like:

  1) NT_PRSTATUS x the number of lcpus
  2) VMCOREINFO
  3) kernel code
  4) copy of the first 640kB memory
  5) System RAM entries

where in /proc/vmcore, 1) and 2) are visible as a single PT_NOTE
entry, and 5) as PT_LOAD entries.

This mapping is never exclusive. For example, any of 1), 2) and 4) is
always contained in one of the System RAM entries.

I add function oldmem_merge_vmcore_list that merges ranges represented
by vmcore_list and makes a merged list in oldmem_list.

We'll remap ranges represented by oldmem_list in direct mapping region
in the patch set that follows this patch.

Signed-off-by: HATAYAMA Daisuke <[email protected]>
---

 fs/proc/vmcore.c |   83 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 83 insertions(+), 0 deletions(-)

diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 0d5071d..405b5e2 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -27,6 +27,11 @@
  */
 static LIST_HEAD(vmcore_list);
 
+/* Remap chunks of contiguous memory represented by this list in
+ * direct mapping region.
+ */
+static LIST_HEAD(oldmem_list);
+
 /* Stores the pointer to the buffer containing kernel elf core headers. */
 static char *elfcorebuf;
 static size_t elfcorebuf_sz;
@@ -137,6 +142,84 @@ static u64 map_offset_to_paddr(loff_t offset, struct 
list_head *vc_list,
        return 0;
 }
 
+static struct vmcore* __init get_new_element(void);
+
+static int
+oldmem_merge_vmcore_list_one(struct vmcore *r, struct list_head *new_list)
+{
+       unsigned long m_start, m_end, n_start, n_end;
+       struct vmcore _m, *m, *n, *new;
+
+       m = &_m;
+       m->paddr = r->paddr;
+       m->size = r->size;
+       m->offset = r->offset;
+
+retry:
+       list_for_each_entry(n, new_list, list) {
+
+               m_start = m->paddr;
+               m_end = m->paddr + m->size - 1;
+
+               n_start = n->paddr;
+               n_end = n->paddr + n->size - 1;
+
+               /* not mergeable */
+               if (((m_start < n_start) && (m_end < n_start))
+                   || ((n_start < m_start) && (n_end < m_start)))
+                       continue;
+
+               /* merge n to m */
+               m->paddr = min(m->paddr, n->paddr);
+               m->size = max(m_end, n_end) - min(m_start, n_start) + 1;
+               m->offset = min(m->offset, n->offset);
+
+               /* n is no longer useful, delete it */
+               list_del(&n->list);
+               kfree(n);
+
+               goto retry;
+       }
+
+       /* there's no map in new_list to merge m, create new element */
+       new = get_new_element();
+       if (!new)
+               return -ENOMEM;
+
+       new->paddr = m->paddr;
+       new->size = m->size;
+       new->offset = m->offset;
+
+       list_add_tail(&new->list, new_list);
+
+       return 0;
+}
+
+static int
+oldmem_merge_vmcore_list(struct list_head *vc_list, struct list_head *om_list)
+{
+       struct vmcore *m;
+       int ret;
+
+       list_for_each_entry(m, vc_list, list) {
+               printk("vmcore: [mem %016llx-%016llx]\n",
+                      m->paddr, m->paddr + m->size - 1);
+       }
+
+       list_for_each_entry(m, vc_list, list) {
+               ret = oldmem_merge_vmcore_list_one(m, om_list);
+               if (ret < 0)
+                       return ret;
+       }
+
+       list_for_each_entry(m, om_list, list) {
+               printk("vmcore: [oldmem %016llx-%016llx]\n",
+                      m->paddr, m->paddr + m->size - 1);
+       }
+
+       return 0;
+}
+
 /* Read from the ELF header and then the crash dump. On error, negative value 
is
  * returned otherwise number of bytes read are returned.
  */


_______________________________________________
kexec mailing list
[email protected]
http://lists.infradead.org/mailman/listinfo/kexec

Reply via email to