This patch adds support for memory management on 32 bit systems without PAE.
Signed-off-by: Alexander Graf <ag...@suse.de> --- pc-bios/xenner/xenner-mm32.c | 314 ++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 314 insertions(+), 0 deletions(-) create mode 100644 pc-bios/xenner/xenner-mm32.c diff --git a/pc-bios/xenner/xenner-mm32.c b/pc-bios/xenner/xenner-mm32.c new file mode 100644 index 0000000..7622ae5 --- /dev/null +++ b/pc-bios/xenner/xenner-mm32.c @@ -0,0 +1,314 @@ +/* + * Copyright (C) Red Hat 2007 + * Copyright (C) Novell Inc. 2010 + * + * Author(s): Gerd Hoffmann <kra...@redhat.com> + * Alexander Graf <ag...@suse.de> + * + * Xenner memory management for 32 bit normal mode + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <inttypes.h> +#include <xen/xen.h> + +#include "xenner.h" +#include "xenner-mm.c" + +/* --------------------------------------------------------------------- */ + +#define MAPS_R_BITS 4 +#define MAPS_R_COUNT (1 << MAPS_R_BITS) +#define MAPS_R_MASK (MAPS_R_COUNT - 1) +#define MAPS_R_SIZE (MAPS_MAX / MAPS_R_COUNT) +#define MAPS_R_LOW(r) (MAPS_R_SIZE * (r)) +#define MAPS_R_HIGH(r) (MAPS_R_SIZE * (r) + MAPS_R_SIZE) +static int maps_next[MAPS_R_COUNT]; + +static spinlock_t maplock = SPIN_LOCK_UNLOCKED; + +/* --------------------------------------------------------------------- */ + +uintptr_t emu_pa(uintptr_t va) +{ + switch(va & 0xfff00000) { + case XEN_TXT: + return va - (uintptr_t)_vstart; + case XEN_IPT: + { + uintptr_t mfn_guest = emudev_get(EMUDEV_CONF_GUEST_START_PFN, 0); + uintptr_t init_pt_pfn = emudev_get(EMUDEV_CONF_PFN_INIT_PT, 0); + + return frame_to_addr(mfn_guest + init_pt_pfn) | (va - XEN_IPT); + } + case XEN_M2P: + return va - XEN_M2P + frame_to_addr(vmconf.mfn_m2p); + } + + panic("unknown address", NULL); + return 0; +} + +/* --------------------------------------------------------------------- */ + +static int find_slot(int range) +{ + int low = MAPS_R_LOW(range); + int high = MAPS_R_HIGH(range); + int *next = maps_next + range; + int start = *next; + int slot; + + while (maps_refcnt[*next]) { + (*next)++; + if (*next == high) { + *next = low; + } + if (*next == start) { + return -1; + } + } + slot = *next; + (*next)++; + if (*next == high) { + *next = low; + } + return slot; +} + +static int mfn_to_slot_32(uint32_t mfn, int range) +{ + int low = MAPS_R_LOW(range); + int high = MAPS_R_HIGH(range); + int slot; + + for (slot = low; slot < high; slot++) { + if (!test_pgflag_32(maps_32[slot], _PAGE_PRESENT)) { + continue; + } + if (get_pgframe_32(maps_32[slot]) == mfn) { + /* cache hit */ + return slot; + } + } + return -1; +} + +void *map_page(unsigned long maddr) +{ + uint32_t mfn = addr_to_frame(maddr); + uint32_t off = addr_offset(maddr); + uint32_t va; + int range, slot; + + spin_lock(&maplock); + range = mfn & MAPS_R_MASK; + slot = mfn_to_slot_32(mfn, range); + if (slot == -1) { + slot = find_slot(range); + if (slot == -1) { + panic("out of map slots", NULL); + } + printk(3, "%s: mfn %5x range %d [%3d - %3d], slot %3d\n", __FUNCTION__, + mfn, range, MAPS_R_LOW(range), MAPS_R_HIGH(range), slot); + maps_32[slot] = get_pgentry_32(mfn, EMU_PGFLAGS); + vminfo.faults[XEN_FAULT_MAPS_MAPIT]++; + va = XEN_MAP_32 + slot*PAGE_SIZE; + flush_tlb_addr(va); + } else { + printk(3, "%s: mfn %5x range %d [%3d - %3d], slot %3d (cached)\n", + __FUNCTION__, mfn, range, MAPS_R_LOW(range), MAPS_R_HIGH(range), + slot); + vminfo.faults[XEN_FAULT_MAPS_REUSE]++; + va = XEN_MAP_32 + slot*PAGE_SIZE; + } + maps_refcnt[slot]++; + spin_unlock(&maplock); + + return (void*)va + off; +} + +void free_page(void *ptr) +{ + uintptr_t va = ((uintptr_t)ptr) & PAGE_MASK; + uintptr_t base = XEN_MAP_32; + int slot = (va - base) >> PAGE_SHIFT; + + spin_lock(&maplock); + maps_refcnt[slot]--; + spin_unlock(&maplock); +} + +void *fixmap_page(struct xen_cpu *cpu, unsigned long maddr) +{ + static int fixmap_slot = MAPS_MAX; + uint32_t mfn = addr_to_frame(maddr); + uint32_t off = addr_offset(maddr); + uint32_t va; + int slot; + + slot = fixmap_slot++; + printk(2, "%s: mfn %5x slot %3d\n", __FUNCTION__, mfn, slot); + maps_32[slot] = get_pgentry_32(mfn, EMU_PGFLAGS); + va = XEN_MAP_32 + slot*PAGE_SIZE; + return (void*)va + off; +} + +/* --------------------------------------------------------------------- */ + +pte_t *find_pte_lpt(uint32_t va) +{ + pte_t *lpt_base = (void*)XEN_LPT_32; + pte_t offset = va >> PAGE_SHIFT; + + return lpt_base + offset; +} + +pte_t *find_pte_map(struct xen_cpu *cpu, uint32_t va) +{ + pte_t *pgd; + pte_t *pte; + int g,t; + + g = PGD_INDEX_32(va); + t = PTE_INDEX_32(va); + printk(5, "va %" PRIx32 " | 32 %d -> %d\n", va, g, t); + + pgd = map_page(frame_to_addr(read_cr3_mfn(cpu))); + printk(5, " pgd %3d = %08" PRIx32 "\n", g, pgd[g]); + if (!test_pgflag_32(pgd[g], _PAGE_PRESENT)) { + return NULL; + } + + pte = map_page(frame_to_addr(get_pgframe_32(pgd[g]))); + printk(5, " pte %3d = %08" PRIx32 "\n", t, pte[t]); + free_page(pgd); + + return pte+t; +} + +void pgtable_walk(struct xen_cpu *cpu, uint32_t va) +{ + pte_t *p; + p = find_pte_map(cpu, va); + free_page(p); +} + +/* --------------------------------------------------------------------- */ + +static inline pte_t *find_pgd(unsigned long va, uint64_t mfn, int alloc) +{ + pte_t *pgd = map_page(frame_to_addr(mfn)); + pte_t *pte; + + pgd += PGD_INDEX_32(va); + if (!test_pgflag(*pgd, _PAGE_PRESENT)) { + pte = get_pages(1, "pte"); + *pgd = get_pgentry(EMU_MFN(pte), _PAGE_PRESENT); + } + + return pgd; +} + +static inline pte_t *find_pte(unsigned long va, uint64_t mfn) +{ + pte_t *pte = map_page(frame_to_addr(mfn)); + return pte + PTE_INDEX_32(va); +} + +static void map_one_page(struct xen_cpu *cpu, unsigned long va, uint64_t maddr, + int flags) +{ + uint64_t mfn = addr_to_frame(maddr); + pte_t *pgd; + pte_t *pte; + + pgd = find_pgd(va, read_cr3_mfn(cpu), 1); + pte = find_pte(va, get_pgframe(*pgd)); + *pte = get_pgentry(mfn, flags); + + free_page(pte); + free_page(pgd); +} + +void map_region(struct xen_cpu *cpu, uint64_t va, uint32_t flags, + uint64_t start, uint64_t count) +{ + uint64_t maddr = frame_to_addr(start); + uint64_t maddr_end = maddr + frame_to_addr(count); + + for (; maddr < maddr_end; maddr += PAGE_SIZE, va += PAGE_SIZE) { + map_one_page(cpu, va, maddr, flags); + } +} + +/* --------------------------------------------------------------------- */ + +void update_emu_mappings(uint32_t cr3_mfn) +{ + uint32_t *new_pgd; + uint32_t entry; + int idx; + + new_pgd = map_page(frame_to_addr(cr3_mfn)); + + idx = PGD_INDEX_32(XEN_M2P_32); + if (!test_pgflag_32(new_pgd[idx], _PAGE_PRESENT)) { + /* new one, must init static mappings */ + for (; idx < PGD_COUNT_32; idx++) { + if (!test_pgflag_32(emu_pgd_32[idx], _PAGE_PRESENT)) { + continue; + } + if (idx == PGD_INDEX_32(XEN_LPT_32)) { + continue; + } + + new_pgd[idx] = emu_pgd_32[idx]; + idx++; + } + } + + /* linear pgtable mapping */ + idx = PGD_INDEX_32(XEN_LPT_32); + entry = get_pgentry_32(cr3_mfn, LPT_PGFLAGS); + if (new_pgd[idx] != entry) { + new_pgd[idx] = entry; + } + + free_page(new_pgd); +} + +/* --------------------------------------------------------------------- */ + +void paging_init(struct xen_cpu *cpu) +{ + uintptr_t mfn_guest = emudev_get(EMUDEV_CONF_GUEST_START_PFN, 0); + uintptr_t init_pt_pfn = emudev_get(EMUDEV_CONF_PFN_INIT_PT, 0); + int idx; + + idx = PGD_INDEX_32(XEN_TXT_32); + emu_pgd_32[idx] = get_pgentry_32(vmconf.mfn_emu, EMU_PGFLAGS | _PAGE_PSE); + + idx = PGD_INDEX_32(XEN_M2P_32); + emu_pgd_32[idx] = get_pgentry_32(vmconf.mfn_m2p, M2P_PGFLAGS_32 | _PAGE_PSE); + + idx = PGD_INDEX_32(XEN_MAP_32); + emu_pgd_32[idx] = get_pgentry_32(EMU_MFN(maps_32), PGT_PGFLAGS_32); + + idx = PGD_INDEX_32(XEN_IPT); + emu_pgd_32[idx] = get_pgentry(mfn_guest + init_pt_pfn, + EMU_PGFLAGS | _PAGE_PSE); + + m2p = (void*)XEN_M2P_32; +} -- 1.6.0.2