Treat memory chunks referenced by PT_LOAD program header entries in
page-size boundary in vmcore_list. Formally, for each range [start,
end], we set up the corresponding vmcore object in vmcore_list to
[rounddown(start, PAGE_SIZE), roundup(end, PAGE_SIZE)].

This change affects layout of /proc/vmcore. The gaps generated by the
rearrangement are newly made visible to applications as
holes. Concretely, they are two ranges [rounddown(start, PAGE_SIZE),
start] and [end, roundup(end, PAGE_SIZE)].

Signed-off-by: HATAYAMA Daisuke <[email protected]>
---

 fs/proc/vmcore.c |   26 ++++++++++++++++++++------
 1 files changed, 20 insertions(+), 6 deletions(-)

diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 029bdc0..cd0f9d9 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -477,16 +477,23 @@ static int __init 
process_ptload_program_headers_elf64(char *elfptr,
        vmcore_off = elfsz + roundup(phdr_ptr->p_memsz, PAGE_SIZE);
 
        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
+               u64 paddr, start, end, size;
+
                if (phdr_ptr->p_type != PT_LOAD)
                        continue;
 
+               paddr = phdr_ptr->p_offset;
+               start = rounddown(paddr, PAGE_SIZE);
+               end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
+               size = end - start;
+
                /* Add this contiguous chunk of memory to vmcore list.*/
-               if (vmcore_add(vc_list, phdr_ptr->p_offset, phdr_ptr->p_memsz))
+               if (vmcore_add(vc_list, start, size))
                        return -ENOMEM;
 
                /* Update the program header offset. */
-               phdr_ptr->p_offset = vmcore_off;
-               vmcore_off = vmcore_off + phdr_ptr->p_memsz;
+               phdr_ptr->p_offset = vmcore_off + (paddr - start);
+               vmcore_off = vmcore_off + size;
        }
        return 0;
 }
@@ -507,16 +514,23 @@ static int __init 
process_ptload_program_headers_elf32(char *elfptr,
        vmcore_off = elfsz + roundup(phdr_ptr->p_memsz, PAGE_SIZE);
 
        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
+               u64 paddr, start, end, size;
+
                if (phdr_ptr->p_type != PT_LOAD)
                        continue;
 
+               paddr = phdr_ptr->p_offset;
+               start = rounddown(paddr, PAGE_SIZE);
+               end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
+               size = end - start;
+
                /* Add this contiguous chunk of memory to vmcore list.*/
-               if (vmcore_add(vc_list, phdr_ptr->p_offset, phdr_ptr->p_memsz))
+               if (vmcore_add(vc_list, start, size))
                        return -ENOMEM;
 
                /* Update the program header offset */
-               phdr_ptr->p_offset = vmcore_off;
-               vmcore_off = vmcore_off + phdr_ptr->p_memsz;
+               phdr_ptr->p_offset = vmcore_off + (paddr - start);
+               vmcore_off = vmcore_off + size;
        }
        return 0;
 }


_______________________________________________
kexec mailing list
[email protected]
http://lists.infradead.org/mailman/listinfo/kexec

Reply via email to