Hello,  I can run a PAE guest in my QEMU.

I'd like to know if this is the right way to implement PAE support.
I'm sending some code here, and I really would appreciate if you can
give me some feedback.
This is not intended to be a patch, yet. i need to fix some issues:

   - I am assuming PAGE_OFFSET == 0Xc0000000  , so I can create the
kernel linear mapping in the launcher code ( instead of doing it in
i386_head.S)
   - no documentation, no error checking, not proper format, etc


and I have some questions:

   -  the hypercalls arguments are 32 bits wide. I need 64 bits for
PAE, but it is working because I don' t use high physical addresses
for these tests.  the question here is:  what is the best technique
for passing 64 bit data with hypercalls.

   - the launcher for PAE should be a different binary?  or could just
receive an option from the command line ?  (I'm using the command line
option   --pae). The only difference is the initial page table.



ps:  I sent a 4mb  pages  patch  some days ago.  If you give me an OK,
 I can adapt that patch to this one.


thank you in advance
Matias



diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 7228369..8965fec 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -90,6 +90,9 @@ static unsigned long guest_limit, guest_max;
 static int timeoutpipe[2];
 static unsigned int timeout_usec = 500;

+/*Are we going to setup PAE compatible page tables for guest ?*/
+static bool pae = false;
+
 /* a per-cpu variable indicating whose vcpu is currently running */
 static unsigned int __thread cpu_id;

@@ -482,6 +485,50 @@ static unsigned long load_initrd(const char
*name, unsigned long mem)
        return len;
 }

+static unsigned long setup_pagetables_pae(unsigned long mem,                   
        
+                                     unsigned long initrd_size)
+{
+       u64  *pgdir, *linear, *pmds;
+       unsigned int mapped_pages, i, linear_pages, j = 0;
+       unsigned int ptes_per_page = getpagesize()/sizeof(u64);
+
+       mapped_pages = mem/getpagesize();
+
+       /* Each PTE page can map ptes_per_page pages: how many do we need? */
+       linear_pages = (mapped_pages + ptes_per_page-1)/ptes_per_page;
+
+       /* We put the toplevel page directory page at the top of memory. */
+       pgdir = from_guest_phys(mem) - initrd_size - getpagesize();
+
+       /* Now we use the next linear_pages pages as pte pages */
+       linear = (void *)pgdir - linear_pages*getpagesize();
+
+       pmds = (void *)linear - getpagesize();
+
+       /* Linear mapping is easy: put every page's address into the mapping in
+        * order.  PAGE_PRESENT contains the flags Present, Writable and
+        * Executable. */
+       for (i = 0; i < mapped_pages; i++)
+               linear[i] = (u64)((i * getpagesize()) | PAGE_PRESENT);
+
+       /* The top level points to the linear page table pages above.  The
+        * entry representing page_offset points to the first one, and they
+        * continue from there. */
+       for (i = 0; i < mapped_pages; i += ptes_per_page, j++) {
+               pmds[j] = ((u32) to_guest_phys(linear + i)) | PAGE_PRESENT;
+       }
+
+       pgdir[0] = (((u32)to_guest_phys(pmds)) | 0x1);
+       pgdir[3] = (((u32)to_guest_phys(pmds)) | 0x1);
+
+       verbose("Linear mapping of %u pages in %u pte pages at %p\n",
+               mapped_pages, linear_pages, linear);
+
+       /* We return the top level (guest-physical) address: the kernel needs
+        * to know where it is. */
+       return to_guest_phys(pgdir);
+}
+
 /* Once we know how much memory we have we can construct simple linear page
  * tables which set virtual == physical which will get the Guest far enough
  * into the boot to create its own.
@@ -1926,13 +1973,14 @@ static struct option opts[] = {
        { "block", 1, NULL, 'b' },
        { "rng", 0, NULL, 'r' },
        { "initrd", 1, NULL, 'i' },
+       { "pae", 0, NULL, 'p' },
        { NULL },
 };
 static void usage(void)
 {
        errx(1, "Usage: lguest [--verbose] "
             "[--tunnet=(<ipaddr>:<macaddr>|bridge:<bridgename>:<macaddr>)\n"
-            "|--block=<filename>|--initrd=<filename>]...\n"
+            "|--block=<filename>|--initrd=<filename>] [--pae]\n"
             "<mem-in-mb> vmlinux [args...]");
 }

@@ -2005,6 +2053,9 @@ int main(int argc, char *argv[])
                case 'i':
                        initrd_name = optarg;
                        break;
+               case 'p':
+                       pae = true;
+                       break;
                default:
                        warnx("Unknown argument %s", argv[optind]);
                        usage();
@@ -2041,7 +2092,10 @@ int main(int argc, char *argv[])
        }

        /* Set up the initial linear pagetables, starting below the initrd. */
-       pgdir = setup_pagetables(mem, initrd_size);
+       if (pae)
+               pgdir = setup_pagetables_pae(mem, initrd_size);
+       else
+               pgdir = setup_pagetables(mem, initrd_size);

        /* The Linux boot header contains an "E820" memory map: ours is a
         * simple, single region. */
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index c70e12b..a108c09 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,7 +2,6 @@ config LGUEST_GUEST
        bool "Lguest guest support"
        select PARAVIRT
        depends on X86_32
-       depends on !X86_PAE
        depends on !X86_VOYAGER
        select VIRTIO
        select VIRTIO_RING
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index d9249a8..43d7384 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -342,6 +342,9 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
                 * flush_tlb_user() for both user and kernel mappings unless
                 * the Page Global Enable (PGE) feature bit is set. */
                *dx |= 0x00002000;
+#ifdef CONFIG_X86_PAE
+               *dx |= 0x00000040;
+#endif
                break;
        case 0x80000000:
                /* Futureproof this a little: if they ask how much extended
@@ -481,15 +484,33 @@ static void lguest_set_pte_at(struct mm_struct
*mm, unsigned long addr,
        lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
 }

+#ifdef CONFIG_X86_PAE
 /* The Guest calls this to set a top-level entry.  Again, we set the entry then
  * tell the Host which top-level page we changed, and the index of the entry we
  * changed. */
+static void lguest_set_pud(pud_t *pudp, pud_t pudval)
+{
+       *pudp = pudval;
+       lazy_hcall(LHCALL_SET_PUD, __pa(pudp)&0xFFFFFFE0,  /* 32 bytes
aligned pdpt address */
+                  (__pa(pudp)& 0x1F )/8, 0);
+}
+
+/* The Guest calls this to set a PMD entry. */
+static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+       *pmdp = pmdval;
+       lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
+                  (__pa(pmdp)&(PAGE_SIZE-1))/8, 0);   /* I don't use this 
arguments yet */
+}
+
+#else
 static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
 {
        *pmdp = pmdval;
        lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
                   (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
 }
+#endif

 /* There are a couple of legacy places where the kernel sets a PTE, but we
  * don't know the top level any more.  This is useless for us, since we don't
@@ -501,12 +522,57 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
  * anything changed until we've done the first page table switch. */
 static void lguest_set_pte(pte_t *ptep, pte_t pteval)
 {
+#ifdef CONFIG_X86_PAE
+        ptep->pte_high = pteval.pte_high;
+        smp_wmb();
+        ptep->pte_low = pteval.pte_low;
+#else
        *ptep = pteval;
+#endif
+
+       /* Don't bother with hypercall before initial setup. */
+       if (current_cr3)
+               lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
+}
+
+#ifdef CONFIG_X86_PAE
+static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+       set_64bit((u64 *)ptep, pte.pte);
+
        /* Don't bother with hypercall before initial setup. */
        if (current_cr3)
                lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
 }

+static inline void lguest_set_pte_present(struct mm_struct *mm,
+                                          unsigned long addr,
+                                          pte_t *ptep, pte_t pte)
+{
+        ptep->pte_low = 0;
+        smp_wmb();
+        ptep->pte_high = pte.pte_high;
+        smp_wmb();
+        ptep->pte_low = pte.pte_low;
+
+       lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pte.pte_low);
+}
+
+void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+        ptep->pte_low = 0;
+        smp_wmb();
+        ptep->pte_high = 0;
+
+       lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0); /*
flush_tlb_single... is this ok? */
+}
+
+void lguest_pmd_clear(pmd_t *pmdp)
+{
+        lguest_set_pmd(pmdp, __pmd(0));
+}
+#endif
+
 /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
  * native page table operations.  On native hardware you can set a new page
  * table entry whenever you want, but if you want to remove one you have to do
@@ -983,6 +1049,14 @@ __init void lguest_init(void)
        pv_mmu_ops.set_pte = lguest_set_pte;
        pv_mmu_ops.set_pte_at = lguest_set_pte_at;
        pv_mmu_ops.set_pmd = lguest_set_pmd;
+
+#ifdef CONFIG_X86_PAE
+        pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
+        pv_mmu_ops.set_pte_present = lguest_set_pte_at;
+       pv_mmu_ops.pte_clear = lguest_pte_clear;
+       pv_mmu_ops.pmd_clear = lguest_pmd_clear;
+       pv_mmu_ops.set_pud = lguest_set_pud;
+#endif
        pv_mmu_ops.read_cr2 = lguest_read_cr2;
        pv_mmu_ops.read_cr3 = lguest_read_cr3;
        pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index 5c7cef3..519dc58 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -35,6 +35,10 @@ ENTRY(lguest_entry)
         * about to do. */
        movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi

+/* FIXME: For pae, I assume page_offset == C0000000
+ * so i can create the kernel linear mapping in the launcher */
+
+#ifndef CONFIG_X86_PAE
        /* Copy first 32 entries of page directory to __PAGE_OFFSET entries.
         * This means the first 128M of kernel memory will be mapped at
         * PAGE_OFFSET where the kernel expects to run.  This will get it far
@@ -44,6 +48,7 @@ ENTRY(lguest_entry)
        addl $((__PAGE_OFFSET >> 22) * 4), %edi
        rep
        movsl
+#endif

        /* Set up the initial stack so we can run C code. */
        movl $(init_thread_union+THREAD_SIZE),%esp
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 76f2b36..5d491bb 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
 config LGUEST
        tristate "Linux hypervisor example code"
-       depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !X86_VOYAGER
+       depends on X86_32 && EXPERIMENTAL && FUTEX && !X86_VOYAGER
        select HVC_DRIVER
        ---help---
          This is a very simple module which allows you to run
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 54d66f0..c5d6678 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -78,6 +78,11 @@ static void do_hcall(struct lg_cpu *cpu, struct
hcall_args *args)
        case LHCALL_SET_PMD:
                guest_set_pmd(cpu->lg, args->arg1, args->arg2);
                break;
+#ifdef CONFIG_X86_PAE
+       case LHCALL_SET_PUD:
+               guest_set_pud(cpu->lg, args->arg1, args->arg2);
+               break;
+#endif
        case LHCALL_SET_CLOCKEVENT:
                guest_set_clockevent(cpu, args->arg1);
                break;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 5faefea..b1fae77 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -18,7 +18,7 @@ int init_pagetables(struct page **switcher_page,
unsigned int pages);

 struct pgdir
 {
-       unsigned long gpgdir;
+       pgd_t *gpgdir;
        pgd_t *pgdir;
 };

@@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long
__user *user);
  * in the kernel. */
 #define pgd_flags(x)   (pgd_val(x) & ~PAGE_MASK)
 #define pgd_pfn(x)     (pgd_val(x) >> PAGE_SHIFT)
+#define pmd_flags(x)    (pmd_val(x) & ~PAGE_MASK)
+#define pmd_pfn(x)     (pmd_val(x) >> PAGE_SHIFT)

 /* interrupts_and_traps.c: */
 void maybe_do_interrupt(struct lg_cpu *cpu);
@@ -168,6 +170,9 @@ int init_guest_pagetable(struct lguest *lg,
unsigned long pgtable);
 void free_guest_pagetable(struct lguest *lg);
 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
 void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
+#ifdef CONFIG_X86_PAE
+void guest_set_pud(struct lguest *lg, unsigned long gpgdir, u32 i);
+#endif
 void guest_pagetable_clear_all(struct lg_cpu *cpu);
 void guest_pagetable_flush_user(struct lg_cpu *cpu);
 void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 81d0c60..21af8e3 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -52,6 +52,14 @@
  * page.  */
 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)

+/* For PAE we need the PMD index as well. We need two Page Tables to
map 4MB of ram.
+   For the top 4MB, the pmd entries are the last two.
+   But, maybe we can use only the last 2MB, so we will need the last
pmd entry of the last pmd page
+*/
+#ifdef CONFIG_X86_PAE
+#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
+#endif
+
 /* We actually need a separate PTE page for each CPU.  Remember that after the
  * Switcher code itself comes two pages for each CPU, and we don't want this
  * CPU's guest to see the pages of any other CPU. */
@@ -72,39 +80,87 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i,
unsigned long vaddr)
 {
        unsigned int index = pgd_index(vaddr);

+#ifndef CONFIG_X86_PAE
        /* We kill any Guest trying to touch the Switcher addresses. */
        if (index >= SWITCHER_PGD_INDEX) {
                kill_guest(cpu, "attempt to access switcher pages");
                index = 0;
        }
+#endif
        /* Return a pointer index'th pgd entry for the i'th page table. */
        return &cpu->lg->pgdirs[i].pgdir[index];
 }

+#ifdef CONFIG_X86_PAE
+/* This routine then takes the PGD entry given above, which contains the
+ * address of the PMD page.  It then returns a pointer to the PMD entry for the
+ * given address. */
+static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
+{
+       unsigned int index = pmd_index(vaddr);
+        pmd_t *page;
+
+       /* We kill any Guest trying to touch the Switcher addresses. */
+       if (pgd_index(vaddr) == SWITCHER_PGD_INDEX && index >= 
SWITCHER_PMD_INDEX) {
+               kill_guest(cpu, "attempt to access switcher pages");
+               index = 0;
+       }
+
+        /* You should never call this if the PGD entry wasn't valid */
+        BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
+
+       page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
+        return &page[index];
+}
+#endif
+
 /* This routine then takes the page directory entry returned above, which
  * contains the address of the page table entry (PTE) page.  It then returns a
  * pointer to the PTE entry for the given address. */
-static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
+static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
 {
+#ifdef CONFIG_X86_PAE
+        pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
+        pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
+
+       /* You should never call this if the PMD entry wasn't valid */
+       BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
+#else
        pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
+
        /* You should never call this if the PGD entry wasn't valid */
        BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
-       return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE];
+#endif
+       return &page[pte_index(vaddr)];
 }

 /* These two functions just like the above two, except they access the Guest
  * page tables.  Hence they return a Guest address. */
-static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
+static pgd_t *gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
 {
        unsigned int index = vaddr >> (PGDIR_SHIFT);
-       return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
+       return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index;
+}
+
+#ifdef CONFIG_X86_PAE
+static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
+{
+       unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
+       BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
+       return gpage + pmd_index(vaddr) * sizeof(pmd_t);
 }
+#endif

-static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
+static unsigned long gpte_addr(struct lg_cpu *cpu, pgd_t gpgd,
unsigned long vaddr)
 {
+#ifdef CONFIG_X86_PAE
+       pmd_t gpmd = lgread(cpu, (unsigned long) gpmd_addr(gpgd, vaddr), pmd_t);
+        unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
+#else
        unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
        BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
-       return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
+#endif
+       return gpage + pte_index(vaddr) * sizeof(pte_t);
 }
 /*:*/

@@ -183,11 +239,24 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte)

 static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
 {
+#ifdef CONFIG_X86_PAE
+       if ((pgd_flags(gpgd) & ~_PAGE_PRESENT) ||
+#else
        if ((pgd_flags(gpgd) & ~_PAGE_TABLE) ||
+#endif
           (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
                kill_guest(cpu, "bad page directory entry");
 }

+#ifdef CONFIG_X86_PAE
+static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
+{
+       if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
+          (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
+               kill_guest(cpu, "bad page middle directory entry");
+}
+#endif
+
 /*H:330
  * (i) Looking up a page table entry when the Guest faults.
  *
@@ -198,6 +267,7 @@ static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
  *
  * If we fixed up the fault (ie. we mapped the address), this routine returns
  * true.  Otherwise, it was a real fault and we need to tell the Guest. */
+
 int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
 {
        pgd_t gpgd;
@@ -206,14 +276,21 @@ int demand_page(struct lg_cpu *cpu, unsigned
long vaddr, int errcode)
        pte_t gpte;
        pte_t *spte;

+#ifdef CONFIG_X86_PAE
+       pmd_t *spmd;
+       pmd_t gpmd;
+#endif
+
        /* First step: get the top-level Guest page table entry. */
-       gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+       gpgd = lgread(cpu, (unsigned long) gpgd_addr(cpu, vaddr), pgd_t);
+
        /* Toplevel not present?  We can't map it in. */
        if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
                return 0;

        /* Now look at the matching shadow entry. */
        spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
+
        if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
                /* No shadow entry: allocate a new shadow PTE page. */
                unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
@@ -225,14 +302,45 @@ int demand_page(struct lg_cpu *cpu, unsigned
long vaddr, int errcode)
                }
                /* We check that the Guest pgd is OK. */
                check_gpgd(cpu, gpgd);
+
                /* And we copy the flags to the shadow PGD entry.  The page
                 * number in the shadow PGD is the page we just allocated. */
                *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
        }

+#ifdef CONFIG_X86_PAE
+       gpmd = lgread(cpu, (unsigned long) gpmd_addr(gpgd, vaddr), pmd_t);
+
+       /* middle level not present?  We can't map it in. */
+        if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+                return 0;
+
+        /* Now look at the matching shadow entry. */
+        spmd = spmd_addr(cpu, *spgd, vaddr);
+
+        if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
+                /* No shadow entry: allocate a new shadow PTE page. */
+                unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+
+                /* This is not really the Guest's fault, but killing it is
+                 * simple for this corner case. */
+                if (!ptepage) {
+                        kill_guest(cpu, "out of memory allocating pte page");
+                        return 0;
+                }
+
+                /* We check that the Guest pmd is OK. */
+                check_gpmd(cpu, gpmd);
+
+                /* And we copy the flags to the shadow PMD entry.  The page
+                 * number in the shadow PMD is the page we just allocated. */
+                *spmd = __pmd(__pa(ptepage) | pmd_flags(gpmd));
+        }
+#endif
+       
        /* OK, now we look at the lower level in the Guest page table: keep its
         * address, because we might update it later. */
-       gpte_ptr = gpte_addr(gpgd, vaddr);
+       gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
        gpte = lgread(cpu, gpte_ptr, pte_t);

        /* If this page isn't in the Guest page tables, we can't page it in. */
@@ -258,7 +366,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long
vaddr, int errcode)
                gpte = pte_mkdirty(gpte);

        /* Get the pointer to the shadow PTE entry we're going to set. */
-       spte = spte_addr(*spgd, vaddr);
+       spte = spte_addr(cpu, *spgd, vaddr);
        /* If there was a valid shadow PTE entry here before, we release it.
         * This can happen with a write to a previously read-only entry. */
        release_pte(*spte);
@@ -299,15 +407,24 @@ static int page_writable(struct lg_cpu *cpu,
unsigned long vaddr)
 {
        pgd_t *spgd;
        unsigned long flags;
+#ifdef CONFIG_X86_PAE
+       pmd_t *spmd;
+#endif

        /* Look at the current top level entry: is it present? */
        spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
        if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
                return 0;

+#ifdef CONFIG_X86_PAE
+       spmd = spmd_addr(cpu, *spgd, vaddr);
+       if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
+               return 0;
+#endif
+
        /* Check the flags on the pte entry itself: it must be present and
         * writable. */
-       flags = pte_flags(*(spte_addr(*spgd, vaddr)));
+       flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));

        return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
 }
@@ -321,8 +438,49 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
                kill_guest(cpu, "bad stack page %#lx", vaddr);
 }

+#ifdef CONFIG_X86_PAE
 /*H:450 If we chase down the release_pgd() code, it looks like this: */
-static void release_pgd(struct lguest *lg, pgd_t *spgd)
+static void release_pmd(pmd_t *spmd)
+{
+        /* If the entry's not present, there's nothing to release. */
+        if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+                unsigned int i;
+                /* Converting the pfn to find the actual PTE page is easy: turn
+                 * the page number into a physical address, then convert to a
+                 * virtual address (easy for kernel pages like this one). */
+                pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
+                /* For each entry in the page, we might need to release it. */
+                for (i = 0; i < PTRS_PER_PTE; i++)
+                        release_pte(ptepage[i]);
+                /* Now we can free the page of PTEs */
+                free_page((long)ptepage);
+                /* And zero out the PGD entry we we never release it twice. */
+                *spmd = __pmd(0);
+        }
+}
+
+/*H:450 If we chase down the release_pgd() code, it looks like this: */
+static void release_pgd(pgd_t *spgd)
+{
+        /* If the entry's not present, there's nothing to release. */
+        if (pgd_flags(*spgd) & _PAGE_PRESENT) {
+                unsigned int i;
+                pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
+
+                for (i = 0; i < PTRS_PER_PMD; i++){
+                        release_pmd(&pmdpage[i]);
+                }
+                /* Now we can free the page of PTEs */
+                free_page((long)pmdpage);
+                /* And zero out the PGD entry we we never release it twice. */
+                *spgd = __pgd(0);
+        }
+}
+
+#else /* !CONFIG_X86_PAE */
+
+/*H:450 If we chase down the release_pgd() code, it looks like this: */
+static void release_pgd(pgd_t *spgd)
 {
        /* If the entry's not present, there's nothing to release. */
        if (pgd_flags(*spgd) & _PAGE_PRESENT) {
@@ -341,6 +499,8 @@ static void release_pgd(struct lguest *lg, pgd_t *spgd)
        }
 }

+#endif
+
 /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
  * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
  * It simply releases every PTE page from 0 up to the Guest's kernel
address. */
@@ -349,7 +509,7 @@ static void flush_user_mappings(struct lguest *lg, int idx)
        unsigned int i;
        /* Release every pgd entry up to the kernel's address. */
        for (i = 0; i < pgd_index(lg->kernel_address); i++)
-               release_pgd(lg, lg->pgdirs[idx].pgdir + i);
+               release_pgd(lg->pgdirs[idx].pgdir + i);
 }

 /*H:440 (v) Flushing (throwing away) page tables,
@@ -369,23 +529,34 @@ unsigned long guest_pa(struct lg_cpu *cpu,
unsigned long vaddr)
        pgd_t gpgd;
        pte_t gpte;

+#ifdef CONFIG_X86_PAE
+       pmd_t gpmd;
+#endif
+
        /* First step: get the top-level Guest page table entry. */
-       gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+       gpgd = lgread(cpu, (unsigned long) gpgd_addr(cpu, vaddr), pgd_t);
        /* Toplevel not present?  We can't map it in. */
        if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
                kill_guest(cpu, "Bad address %#lx", vaddr);

-       gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
+#ifdef CONFIG_X86_PAE
+       gpmd = lgread(cpu, (unsigned long) gpmd_addr(gpgd, vaddr), pmd_t);
+       if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+               kill_guest(cpu, "Bad address %#lx", vaddr);
+#endif
+
+       gpte = lgread(cpu, (unsigned long) gpte_addr(cpu, gpgd, vaddr), pte_t);
        if (!(pte_flags(gpte) & _PAGE_PRESENT))
                kill_guest(cpu, "Bad address %#lx", vaddr);

        return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
 }

+
 /* We keep several page tables.  This is a simple routine to find the page
  * table (if any) corresponding to this top-level address the Guest has given
  * us. */
-static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
+static unsigned int find_pgdir(struct lguest *lg, pgd_t *pgtable)
 {
        unsigned int i;
        for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
@@ -398,11 +569,13 @@ static unsigned int find_pgdir(struct lguest
*lg, unsigned long pgtable)
  * allocate a new one (and so the kernel parts are not there), we set
  * blank_pgdir. */
 static unsigned int new_pgdir(struct lg_cpu *cpu,
-                             unsigned long gpgdir,
+                             pgd_t *gpgdir,
                              int *blank_pgdir)
 {
        unsigned int next;
-
+#ifdef CONFIG_X86_PAE
+       pmd_t * pmd_table;
+#endif
        /* We pick one entry at random to throw out.  Choosing the Least
         * Recently Used might be better, but this is easy. */
        next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
@@ -413,11 +586,21 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
                /* If the allocation fails, just keep using the one we have */
                if (!cpu->lg->pgdirs[next].pgdir)
                        next = cpu->cpu_pgd;
-               else
+               else {
                        /* This is a blank page, so there are no kernel
                         * mappings: caller must map the stack! */
                        *blank_pgdir = 1;
+
+                       #ifdef CONFIG_X86_PAE
+                       /* In PAE mode, allocate a pmd page and populate the 
last pgd entry */
+                       pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
+                       set_pgd(cpu->lg->pgdirs[next].pgdir + 
SWITCHER_PGD_INDEX,
+                               __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+                       //FIXME: ERROR CHECKING AFTER ALLOCATE
+                       #endif
+               }
        }
+
        /* Record which Guest toplevel this shadows. */
        cpu->lg->pgdirs[next].gpgdir = gpgdir;
        /* Release all the non-kernel mappings. */
@@ -436,11 +619,11 @@ void guest_new_pagetable(struct lg_cpu *cpu,
unsigned long pgtable)
        int newpgdir, repin = 0;

        /* Look to see if we have this one already. */
-       newpgdir = find_pgdir(cpu->lg, pgtable);
+       newpgdir = find_pgdir(cpu->lg, (pgd_t *)pgtable);
        /* If not, we allocate or mug an existing one: if it's a fresh one,
         * repin gets set to 1. */
        if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
-               newpgdir = new_pgdir(cpu, pgtable, &repin);
+               newpgdir = new_pgdir(cpu, (pgd_t *)pgtable, &repin);
        /* Change the current pgd index to the new one. */
        cpu->cpu_pgd = newpgdir;
        /* If it was completely blank, we map in the Guest kernel stack */
@@ -455,12 +638,28 @@ static void release_all_pagetables(struct lguest *lg)
 {
        unsigned int i, j;

+#ifdef CONFIG_X86_PAE
+       pgd_t *spgd;    
+       pmd_t *pmdpage;
+#endif
+
        /* Every shadow pagetable this Guest has */
        for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
-               if (lg->pgdirs[i].pgdir)
+               if (lg->pgdirs[i].pgdir) {
                        /* Every PGD entry except the Switcher at the top */
                        for (j = 0; j < SWITCHER_PGD_INDEX; j++)
-                               release_pgd(lg, lg->pgdirs[i].pgdir + j);
+                               release_pgd(lg->pgdirs[i].pgdir + j);
+#ifdef CONFIG_X86_PAE
+                       /*Get the last pmd page */
+                       spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
+                       pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
+
+                       /*And release the pmd entries of that pmd page, except 
for the
switcher pmd */
+                       for (i = 0; i < SWITCHER_PMD_INDEX; i++){
+                               release_pmd(&pmdpage[i]);
+                       }
+#endif
+               }
 }

 /* We also throw away everything when a Guest tells us it's changed a kernel
@@ -502,23 +701,36 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
        /* Look up the matching shadow page directory entry. */
        pgd_t *spgd = spgd_addr(cpu, idx, vaddr);

+#ifdef CONFIG_X86_PAE
+       pmd_t * spmd;
+#endif
+
        /* If the top level isn't present, there's no entry to update. */
        if (pgd_flags(*spgd) & _PAGE_PRESENT) {
-               /* Otherwise, we start by releasing the existing entry. */
-               pte_t *spte = spte_addr(*spgd, vaddr);
-               release_pte(*spte);
-
-               /* If they're setting this entry as dirty or accessed, we might
-                * as well put that entry they've given us in now.  This shaves
-                * 10% off a copy-on-write micro-benchmark. */
-               if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
-                       check_gpte(cpu, gpte);
-                       *spte = gpte_to_spte(cpu, gpte,
-                                            pte_flags(gpte) & _PAGE_DIRTY);
-               } else
-                       /* Otherwise kill it and we can demand_page() it in
-                        * later. */
-                       *spte = __pte(0);
+
+#ifdef CONFIG_X86_PAE
+               spmd = spmd_addr(cpu, *spgd, vaddr);
+               if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+#endif
+
+                       /* Otherwise, we start by releasing the existing entry. 
*/
+                       pte_t *spte = spte_addr(cpu, *spgd, vaddr);
+                       release_pte(*spte);
+
+                       /* If they're setting this entry as dirty or accessed, 
we might
+                        * as well put that entry they've given us in now.  
This shaves
+                        * 10% off a copy-on-write micro-benchmark. */
+                       if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
+                               check_gpte(cpu, gpte);
+                               *spte = gpte_to_spte(cpu, gpte,
+                                                    pte_flags(gpte) & 
_PAGE_DIRTY);
+                       } else
+                               /* Otherwise kill it and we can demand_page() 
it in
+                                * later. */
+                               *spte = __pte(0);
+#ifdef CONFIG_X86_PAE
+               }
+#endif
        }
 }

@@ -544,7 +756,7 @@ void guest_set_pte(struct lg_cpu *cpu,
                                do_set_pte(cpu, i, vaddr, gpte);
        } else {
                /* Is this page table one we have a shadow for? */
-               int pgdir = find_pgdir(cpu->lg, gpgdir);
+               int pgdir = find_pgdir(cpu->lg, (pgd_t *)gpgdir);
                if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
                        /* If so, do the update. */
                        do_set_pte(cpu, pgdir, vaddr, gpte);
@@ -565,9 +777,33 @@ void guest_set_pte(struct lg_cpu *cpu,
  *
  * So with that in mind here's our code to to update a (top-level) PGD entry:
  */
-void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
+
+#ifdef CONFIG_X86_PAE
+void guest_set_pud(struct lguest *lg, unsigned long pudp, u32 idx)
+{
+       int pgdir;
+       pgd_t *gpgdir = (pgd_t *) pudp;
+
+       /* If they're talking about a page table we have a shadow for... */
+       pgdir = find_pgdir(lg, gpgdir);
+       if (pgdir < ARRAY_SIZE(lg->pgdirs))
+               /* ... throw it away. */
+               release_pgd(lg->pgdirs[pgdir].pgdir + idx);
+}
+
+/* We won't use the arguments here...
+ * I need to fix this, how can we improve it ?  */
+void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
+{
+       release_all_pagetables(lg);
+}
+
+#else /*!CONFIG_X86_PAE*/
+
+void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
 {
        int pgdir;
+       pgd_t *gpgdir = (pgd_t *) pmdp;

        /* The kernel seems to try to initialize this early on: we ignore its
         * attempts to map over the Switcher. */
@@ -578,8 +814,9 @@ void guest_set_pmd(struct lguest *lg, unsigned
long gpgdir, u32 idx)
        pgdir = find_pgdir(lg, gpgdir);
        if (pgdir < ARRAY_SIZE(lg->pgdirs))
                /* ... throw it away. */
-               release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
+               release_pgd(lg->pgdirs[pgdir].pgdir + idx);
 }
+#endif

 /*H:500 (vii) Setting up the page tables initially.
  *
@@ -587,12 +824,26 @@ void guest_set_pmd(struct lguest *lg, unsigned
long gpgdir, u32 idx)
  * its first page table is.  We set some things up here: */
 int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
 {
+#ifdef CONFIG_X86_PAE
+        pgd_t *pgd;
+        pmd_t *pmd_table;
+#endif
        /* We start on the first shadow page table, and give it a blank PGD
         * page. */
-       lg->pgdirs[0].gpgdir = pgtable;
+       lg->pgdirs[0].gpgdir = (pgd_t *) pgtable;
        lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
        if (!lg->pgdirs[0].pgdir)
                return -ENOMEM;
+
+#ifdef CONFIG_X86_PAE
+        pgd = lg->pgdirs[0].pgdir;
+        pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
+       if (!pmd_table)
+               return -ENOMEM;
+
+        set_pgd(pgd + SWITCHER_PGD_INDEX, __pgd(__pa(pmd_table) |
_PAGE_PRESENT));
+#endif
+
        lg->cpus[0].cpu_pgd = 0;
        return 0;
 }
@@ -600,21 +851,33 @@ int init_guest_pagetable(struct lguest *lg,
unsigned long pgtable)
 /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
 void page_table_guest_data_init(struct lg_cpu *cpu)
 {
+#ifdef CONFIG_X86_PAE
+       const unsigned long reserve_mb = 2;
+#else
+       const unsigned long reserve_mb = 4;
+#endif
+
        /* We get the kernel address: above this is all kernel memory. */
        if (get_user(cpu->lg->kernel_address,
                     &cpu->lg->lguest_data->kernel_address)
-           /* We tell the Guest that it can't use the top 4MB of virtual
+           /* We tell the Guest that it can't use the top 2 or 4 MB of virtual
             * addresses used by the Switcher. */
-           || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem)
-           || put_user(cpu->lg->pgdirs[0].gpgdir, 
&cpu->lg->lguest_data->pgdir))
+           || put_user(reserve_mb * 1024 * 1024, 
&cpu->lg->lguest_data->reserve_mem)
+           || put_user((unsigned long) cpu->lg->pgdirs[0].gpgdir,
&cpu->lg->lguest_data->pgdir))
                kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);

        /* In flush_user_mappings() we loop from 0 to
         * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
         * Switcher mappings, so check that now. */
+#ifdef CONFIG_X86_PAE
+       if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX)
+               if (pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
+                       kill_guest(cpu, "bad kernel address %#lx", 
cpu->lg->kernel_address);
+#else
        if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
                kill_guest(cpu, "bad kernel address %#lx",
                                 cpu->lg->kernel_address);
+#endif
 }

 /* When a Guest dies, our cleanup is fairly simple. */
@@ -638,15 +901,25 @@ void free_guest_pagetable(struct lguest *lg)
 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
 {
        pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
-       pgd_t switcher_pgd;
        pte_t regs_pte;
        unsigned long pfn;

+#ifdef CONFIG_X86_PAE
+        pmd_t switcher_pmd;
+        pmd_t * pmd_table;
+
+        switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT,
__pgprot(__PAGE_KERNEL));
+        pmd_table = __va( pgd_pfn(
cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] ) <<
PAGE_SHIFT ) ;
+        pmd_table[SWITCHER_PMD_INDEX] = switcher_pmd;
+
+#else
+       pgd_t switcher_pgd;
+
        /* Make the last PGD entry for this Guest point to the Switcher's PTE
         * page for this CPU (with appropriate flags). */
        switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL);
-
        cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
+#endif

        /* We also change the Switcher PTE page.  When we're running the Guest,
         * we want the Guest's "regs" page to appear where the first Switcher
@@ -657,7 +930,7 @@ void map_switcher_in_guest(struct lg_cpu *cpu,
struct lguest_pages *pages)
         * again. */
        pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
        regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL));
-       switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = 
regs_pte;
+       switcher_pte_page[(unsigned long)pages / PAGE_SIZE % PTRS_PER_PTE] = 
regs_pte;
 }
 /*:*/

@@ -666,7 +939,7 @@ static void free_switcher_pte_pages(void)
        unsigned int i;

        for_each_possible_cpu(i)
-               free_page((long)switcher_pte_page(i));
+       free_page((long)switcher_pte_page(i));
 }

 /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index be4a724..72298fa 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -17,8 +17,13 @@
 /* Pages for switcher itself, then two pages per cpu */
 #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)

-/* We map at -4M for ease of mapping into the guest (one PTE page). */
+#ifdef CONFIG_X86_PAE
+/* We map at -2M for ease of mapping into the guest (one PTE page).
4292870144 */
+#define SWITCHER_ADDR 0xFFE00000
+#else
+/* We map at -4M for ease of mapping into the guest (one PTE page).
esto es 4290772992  () */
 #define SWITCHER_ADDR 0xFFC00000
+#endif

 /* Found in switcher.S */
 extern unsigned long default_idt_entries[];
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index a3241f2..d604c5a 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -17,6 +17,7 @@
 #define LHCALL_SET_PMD         15
 #define LHCALL_LOAD_TLS                16
 #define LHCALL_NOTIFY          17
+#define LHCALL_SET_PUD         18

 #define LGUEST_TRAP_ENTRY 0x1F
_______________________________________________
Lguest mailing list
[email protected]
https://ozlabs.org/mailman/listinfo/lguest

Reply via email to