For some reason I only set it up for memory used by
ELF segments. This makes zero sense when viewed in the
cold, hard, light of a new day.

Change-Id: I01ead9986f89b2a8697cbd5359e188a0ea66d883
Signed-off-by: Ronald G. Minnich <[email protected]>
---
 tests/vmm/vmrunkernel.c | 37 ++++++++++++++++++++-----------------
 1 file changed, 20 insertions(+), 17 deletions(-)

diff --git a/tests/vmm/vmrunkernel.c b/tests/vmm/vmrunkernel.c
index 4009b66..7e5844a 100644
--- a/tests/vmm/vmrunkernel.c
+++ b/tests/vmm/vmrunkernel.c
@@ -328,7 +328,7 @@ static inline int test_and_set_bit(int nr, volatile 
unsigned long *addr)
 
 /* load_kernel loads an ELF file as a kernel. */
 uintptr_t
-load_kernel(char *filename, uintptr_t *kernstart, uintptr_t *kernend)
+load_kernel(char *filename)
 {
        Elf64_Ehdr *ehdr;
        Elf *elf;
@@ -395,10 +395,6 @@ load_kernel(char *filename, uintptr_t *kernstart, 
uintptr_t *kernend)
                        continue;
 
                pa = h->p_paddr;
-               if (*kernstart > pa)
-                       *kernstart = pa;
-               if (*kernend < pa + h->p_memsz)
-                       *kernend = pa + h->p_memsz;
                fprintf(stderr,
                        "Read header %d @offset %p to %p (elf PA is %p) %d 
bytes:",
                        i, h->p_offset, pa, h->p_paddr, h->p_filesz);
@@ -441,7 +437,6 @@ int main(int argc, char **argv)
        int vmmflags = 0; // Disabled probably forever. VMM_VMCALL_PRINTF;
        uint64_t entry = 0;
        int ret;
-       uintptr_t size;
        uint8_t csum;
        void *a_page;
        struct vm_trapframe *vm_tf;
@@ -453,7 +448,6 @@ int main(int argc, char **argv)
        struct stat stat_result;
        int num_read;
        int option_index;
-       uintptr_t kernstart = (uintptr_t)~1, kernend = 0;
        static struct option long_options[] = {
                {"debug",         no_argument,       0, 'd'},
                {"vmm_vmcall",    no_argument,       0, 'v'},
@@ -601,7 +595,7 @@ int main(int argc, char **argv)
                exit(1);
        }
 
-       entry = load_kernel(argv[0], &kernstart, &kernend);
+       entry = load_kernel(argv[0]);
        if (entry == 0) {
                fprintf(stderr, "Unable to load kernel %s\n", argv[0]);
                exit(1);
@@ -814,7 +808,9 @@ int main(int argc, char **argv)
         * PTEs with only one entry filled to point to a page of 1 GiB
         * PTEs; a page of 1 GiB PTEs with only one entry filled to
         * point to a page of 2 MiB PTEs; and a page of 2 MiB PTEs,
-        * only a subset of which will be filled. */
+        * all of which may be filled. For now, we don't handle
+        * starting addresses not aligned on 512 GiB boundaries or
+        * sizes > GiB */
        ret = posix_memalign((void **)&p512, PGSIZE, 3 * PGSIZE);
        if (ret) {
                perror("ptp alloc");
@@ -823,7 +819,7 @@ int main(int argc, char **argv)
 
        /* Set up a 1:1 ("identity") page mapping from guest virtual
         * to guest physical using the (host virtual)
-        * `kerneladdress`. This mapping is used for only a short
+        * `kerneladdress`. This mapping may be used for only a short
         * time, until the guest sets up its own page tables. Be aware
         * that the values stored in the table are physical addresses.
         * This is subtle and mistakes are easily disguised due to the
@@ -832,13 +828,20 @@ int main(int argc, char **argv)
        p1 = &p512[NPTENTRIES];
        p2m = &p512[2 * NPTENTRIES];
 
-       size = kernend - kernstart;
-       fprintf(stderr, "Map %p for %zu bytes\n", kernstart, size);
-       p512[PML4(kernstart)] = (uint64_t)p1 | PTE_KERN_RW;
-       p1[PML3(kernstart)] = (uint64_t)p2m | PTE_KERN_RW;
-       for (uintptr_t i = 0; i < size; i += PML2_PTE_REACH) {
-               p2m[PML2(kernstart + i)] =
-                       (uint64_t)(kernstart + i) | PTE_KERN_RW | PTE_PS;
+       fprintf(stderr, "Map %p for %zu bytes\n", memstart, memsize);
+       /* TODO: fix this nested loop so it's correct for more than
+        * one GiB. */
+       for(uintptr_t p4 = memstart; p4 < memstart + memsize;
+           p4 += PML4_PTE_REACH) {
+               p512[PML4(p4)] = (uint64_t)p1 | PTE_KERN_RW;
+               for (uintptr_t p3 = p4; p3 < memstart + memsize;
+                    p3 += PML3_PTE_REACH) {
+                       p1[PML3(p3)] = (uint64_t)p2m | PTE_KERN_RW;
+                       for (uintptr_t p2 = p3; p2 < memstart + memsize; p2 += 
PML2_PTE_REACH) {
+                               p2m[PML2(p2)] =
+                                       (uint64_t)(p2) | PTE_KERN_RW | PTE_PS;
+                       }
+               }
        }
 
        fprintf(stderr, "p512 %p p512[0] is 0x%lx p1 %p p1[0] is 0x%x\n", p512, 
p512[0], p1, p1[0]);
-- 
2.8.0.rc3.226.g39d4020

-- 
You received this message because you are subscribed to the Google Groups 
"Akaros" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To post to this group, send email to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to