All,
Attached is a copy of kernel/host-linux.c, with some mods
which allow it to compile on a 2.4 kernel (linux-2.4.0-test7).
I looked through the mods from Ken and Frank. Got some
ideas there, though the implementations didn't look
like they'd work.
I have _not_ tried this yet. I only downloaded the
2.4 kernel and compiled plex86 against it. I don't
have a running kernel. Use at your own risk.
I would be cool if some daring types give it a spin
and let me know if it works. I don't want to
commit changes to CVS until then.
Scanning through the 2.4 header files, this is just my
buest guess.
-Kevin
/*
* plex86: run multiple x86 operating systems concurrently
* Copyright (C) 1999-2000 Kevin P. Lawton
*
* host-linux.c: Linux specific VM host driver functionality
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "plex86.h"
#define IN_HOST_SPACE
#include "monitor.h"
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/wrapper.h>
#include <linux/version.h>
#include <asm/irq.h>
#ifndef VERSION_CODE
# define VERSION_CODE(vers,rel,seq) ( ((vers)<<16) | ((rel)<<8) | (seq) )
#endif
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,0)
# include <asm/uaccess.h>
#endif
#include <asm/io.h>
/************************************************************************/
/* Compatibility macros for older kernels */
/************************************************************************/
#ifndef EXPORT_NO_SYMBOLS
# define EXPORT_NO_SYMBOLS register_symtab(NULL)
#endif
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,29)
# define proc_register_dynamic proc_register
#endif
#if LINUX_VERSION_CODE < VERSION_CODE(2,2,0)
#define NEED_RESCHED need_resched
#else
#define NEED_RESCHED current->need_resched
#endif
#if LINUX_VERSION_CODE < VERSION_CODE(2,1,0)
static inline unsigned long copy_from_user(void *to, const void *from, unsigned long n)
{
int i;
if ((i = verify_area(VERIFY_READ, from, n)) != 0)
return i;
memcpy_fromfs(to, from, n);
return 0;
}
static inline unsigned long copy_to_user(void *to, const void *from, unsigned long n)
{
int i;
if ((i = verify_area(VERIFY_WRITE, to, n)) != 0)
return i;
memcpy_tofs(to, from, n);
return 0;
}
#endif
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,18) && !defined(THIS_MODULE)
/* Starting with version 2.1.18, the __this_module symbol is present,
but the THIS_MODULE #define was introduced much later ... */
#define THIS_MODULE (&__this_module)
#endif
/************************************************************************/
/* Declarations */
/************************************************************************/
/* Use dynamic major number allocation. (Set non-zero for static allocation) */
#define PLEX86_MAJOR 0
static int plex_major = PLEX86_MAJOR;
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,18)
MODULE_PARM(plex_major, "i");
MODULE_PARM_DESC(plex_major, "major number (default " __MODULE_STRING(PLEX86_MAJOR)
")");
#endif
/* The kernel segment base */
#if LINUX_VERSION_CODE < VERSION_CODE(2,1,0)
# define KERNEL_OFFSET 0xc0000000
#else
# define KERNEL_OFFSET 0x00000000
#endif
/* File operations */
static int plex86_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
static int plex86_open(struct inode *, struct file *);
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,31)
static int plex86_release(struct inode *, struct file *);
#else
static void plex86_release(struct inode *, struct file *);
#endif
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,0)
static int plex86_mmap(struct file * file, struct vm_area_struct * vma);
#else
static int plex86_mmap(struct inode * inode, struct file * file, struct
vm_area_struct * vma);
#endif
/************************************************************************/
/* Structures / Variables */
/************************************************************************/
static int retrieve_vm_pages(Bit32u *page, int max_pages, void *addr, unsigned size);
static unsigned retrieve_phy_pages(Bit32u *page, int max_pages, void *addr, unsigned
size);
static int retrieve_monitor_pages(void);
static void reserve_guest_pages(vm_t *vm);
static void unreserve_guest_pages(vm_t *vm);
monitor_pages_t monitor_pages;
extern unsigned redir_cnt[256];
static struct file_operations plex86_fops;
/* For the /proc/plex86 entry */
#if LINUX_VERSION_CODE >= VERSION_CODE(2,4,0) /* XXX - How far back? */
int plex86_read_procmem(char *, char **, off_t, int);
#else
int plex86_read_procmem(char *, char **, off_t, int, int);
#endif
#if LINUX_VERSION_CODE < VERSION_CODE(2,3,25)
static struct proc_dir_entry plex86_proc_entry = {
0, /* dynamic inode */
6, "plex86", /* len, name */
S_IFREG | S_IRUGO, /* mode */
1, 0, 0,
0,
NULL,
&plex86_read_procmem, /* read function */
};
#endif
/************************************************************************/
/* Main kernel module code */
/************************************************************************/
int
init_module(void)
{
int err;
/* clear uninitialised structures */
memset(redir_cnt, 0, sizeof(redir_cnt));
memset(&monitor_pages, 0, sizeof(monitor_pages));
/* fill in the file operation entries we support */
memset(&plex86_fops, 0, sizeof(plex86_fops));
plex86_fops.mmap = plex86_mmap;
plex86_fops.ioctl = plex86_ioctl;
plex86_fops.open = plex86_open;
plex86_fops.release = plex86_release;
/* register the device with the kernel */
err = register_chrdev(plex_major, "plex86", &plex86_fops);
if (err < 0) {
printk(KERN_WARNING "plex86: can't get major %d\n", plex_major);
return(err);
}
/* If this was a dynamic allocation, save the major for
* the release code
*/
if(!plex_major)
plex_major = err;
/* register the /proc entry */
#ifdef CONFIG_PROC_FS
#if LINUX_VERSION_CODE >= VERSION_CODE(2,3,25)
if (!create_proc_info_entry("plex86", 0, NULL, plex86_read_procmem))
printk(KERN_ERR "plex86: registering /proc/plex86 failed\n");
#else
proc_register_dynamic(&proc_root, &plex86_proc_entry);
#endif
#endif
/* retrieve the monitor physical pages */
if (!retrieve_monitor_pages()) {
printk("retrieve_monitor_pages returned error\n");
err = -EINVAL;
goto fail_retrieve_pages;
}
if (!get_cpu_capabilities()) {
printk("get_cpu_capabilities returned error\n");
err = -EINVAL;
goto fail_cpu_capabilities;
}
else {
#if 0
printk(KERN_WARNING "ptype:%u, family:%u, model:%u stepping:%u\n",
cpuid_info.procSignature.fields.procType,
cpuid_info.procSignature.fields.family,
cpuid_info.procSignature.fields.model,
cpuid_info.procSignature.fields.stepping);
#endif
}
#if 0
{
Bit32u cr0;
asm volatile (
"movl %%cr0, %0"
: "=r" (cr0)
);
monprint(vm, "host CR0=0x%x\n", cr0);
}
#endif
/* success */
EXPORT_NO_SYMBOLS;
return(0);
fail_cpu_capabilities:
fail_retrieve_pages:
/* unregister /proc entry */
#ifdef CONFIG_PROC_FS
#if LINUX_VERSION_CODE >= VERSION_CODE(2,3,25)
remove_proc_entry("plex86", NULL);
#else
proc_unregister(&proc_root, plex86_proc_entry.low_ino);
#endif
#endif
/* unregister device */
unregister_chrdev(plex_major, "plex86");
return err;
}
void
cleanup_module(void)
{
/* unregister device */
unregister_chrdev(plex_major, "plex86");
/* unregister /proc entry */
#ifdef CONFIG_PROC_FS
#if LINUX_VERSION_CODE >= VERSION_CODE(2,3,25)
remove_proc_entry("plex86", NULL);
#else
proc_unregister(&proc_root, plex86_proc_entry.low_ino);
#endif
#endif
}
/************************************************************************/
/* Open / Release a VM */
/************************************************************************/
int
plex86_open(struct inode *inode, struct file *filp)
{
vm_t *vm;
MOD_INC_USE_COUNT;
/* allocate a VM structure */
if ( (vm = vmalloc(sizeof(*vm))) == NULL )
return -ENOMEM;
memset( vm, 0, sizeof(*vm) );
filp->private_data = vm;
/* Set defaults of tweakable parameters */
vm->prescanDepth = PrescanDepthDefault;
vm->mon_state = MON_STATE_UNINITIALIZED;
return(0);
}
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,31)
int
#else
void
#endif
plex86_release(struct inode *inode, struct file *filp)
{
vm_t *vm = (vm_t *)filp->private_data;
filp->private_data = NULL;
/* free the virtual memory */
unreserve_guest_pages( vm );
unalloc_vm_pages( vm );
/* free the VM structure */
memset( vm, 0, sizeof(*vm) );
vfree( vm );
MOD_DEC_USE_COUNT;
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,31)
return(0);
#endif
}
/************************************************************************/
/* VM operations: ioctl() and mmap() */
/************************************************************************/
int
plex86_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
vm_t *vm = (vm_t *)filp->private_data;
unsigned error;
switch (cmd)
{
/*
* Allocate unpaged memory for the VM.
* arg is the number of megabytes to allocate
* Memory returned must not be pageable by the
* host OS, since the VM monitor will run in this
* memory as well. Perhaps later, we can let
* the guest OS run in paged memory and reflect
* the page faults back to the host OS.
*/
case PLEX86_ALLOCVPHYS:
{
guest_cpu_t guest_cpu;
/* Do not allow duplicate allocation */
if (vm->mon_state != MON_STATE_UNINITIALIZED)
return -EBUSY;
if (vm->pages.guest_n_megs != 0)
return -EBUSY;
/* Check that the amount of memory is reasonable */
if ( arg > PLEX86_MAX_PHY_MEGS
|| arg < 4
|| (arg & ~0x3) != arg )
return -EINVAL;
/* Allocate memory */
if ( (error = alloc_vm_pages(vm, arg)) != 0 ) {
printk(KERN_WARNING "plex86: alloc_vm_pages failed at %u\n",
error);
return -ENOMEM;
}
/* Mark guest pages as reserved (for mmap()) */
reserve_guest_pages( vm );
/* Initialize the guests physical memory. */
if ( init_guest_phy_mem(vm) ) {
unreserve_guest_pages(vm);
unalloc_vm_pages(vm);
return -EFAULT;
}
get_cpu_reset_values(&guest_cpu);
/* Initialize the monitor */
printk(KERN_WARNING "cpu.cr0 = 0x%x\n", guest_cpu.cr0);
if ( init_monitor(vm, KERNEL_OFFSET, 0, &guest_cpu) ||
init_guest_cpu(vm, 0, &guest_cpu) )
{
unreserve_guest_pages(vm);
unalloc_vm_pages(vm);
return -EFAULT;
}
printk(KERN_WARNING "plex86: vm_t size is %u\n", sizeof(vm_t));
return 0;
}
/*
* Tear down VM environment
*/
case PLEX86_TEARDOWN:
/* Do *not* free pages that are still mapped to user space! */
#if LINUX_VERSION_CODE >= VERSION_CODE(2,3,99)
/* Not sure when this changed. If you know, email us. */
if (inode->i_data.i_mmap != NULL)
#else
if (inode->i_mmap != NULL)
#endif
{
printk(KERN_WARNING "plex86: guest memory is still mapped!\n");
return -EBUSY;
}
unreserve_guest_pages( vm );
unalloc_vm_pages( vm );
return 0;
/*
* Allocate an interrupt for forwarding to the user monitor
*/
case PLEX86_ALLOCINT:
return -EINVAL; /* xxx */
/* Check that we allocate a valid interrupt */
if (arg > 256)
return -EINVAL;
/* Allocate the interrupt */
BMAP_SET(vm->host_fwd_ints, arg);
return 0;
/*
* Release an interrupt for forwarding to the user monitor
*/
case PLEX86_RELEASEINT:
return -EINVAL; /* xxx */
/* check that we release a valid interrupt */
if (arg > 256)
return -EINVAL;
/* release the interrupt */
BMAP_CLR(vm->host_fwd_ints, arg);
return 0;
case PLEX86_PRESCANDEPTH:
if ( (arg < PrescanDepthMin) || (arg > PrescanDepthMax) ) {
printk(KERN_WARNING "plex86: Requested prescan depth %lu"
" out of range [%u..%u]\n", arg, PrescanDepthMin, PrescanDepthMax);
return -EINVAL;
}
vm->prescanDepth = arg;
return 0;
/*
* Set or clear the INTR line
*/
case PLEX86_SETINTR:
ioctlSetIntr(vm, arg);
return 0;
/*
* Get guest context structure
*/
case PLEX86_GETCONTEXT:
{
guest_context_t context;
/* +++ need state check here */
get_guest_context(vm, &context);
if (copy_to_user((void *)arg, &context, sizeof(context)))
return -EFAULT;
return 0;
}
/*
* Set guest context structure
*/
case PLEX86_SETCONTEXT:
{
guest_context_t context;
if (vm->mon_state != MON_STATE_RUNNABLE)
return -EINVAL;
if (copy_from_user(&context, (void *)arg, sizeof(context)))
return -EFAULT;
set_guest_context(vm, &context);
return 0;
}
/*
* Main message loop entry point
*/
case PLEX86_MESSAGEQ:
{
vm_messages_t msg;
if (vm->mon_state != MON_STATE_RUNNABLE)
return -EINVAL;
if (copy_from_user(&msg.header, (void *)arg, sizeof(msg.header)))
return -EFAULT;
if ( (msg.header.msg_len + sizeof(msg.header)) > sizeof(msg))
return -EINVAL;
if (msg.header.msg_len)
if (copy_from_user(&msg.msg, &((vm_messages_t *)arg)->msg,
msg.header.msg_len))
return -EFAULT;
if (ioctlMessageQ(vm, &msg)) {
printk(KERN_WARNING "plex86: ioctlMessageQ failed\n");
return -EINVAL;
}
if (copy_to_user((void *)arg, &msg,
sizeof(msg.header) + msg.header.msg_len))
return -EFAULT;
return 0;
}
/*
* for debugging, when the module gets hosed, this is a way
* to reset the in-use count, so we can rmmod it.
*/
case PLEX86_RESET:
while (MOD_IN_USE)
MOD_DEC_USE_COUNT;
MOD_INC_USE_COUNT; /* bump back to 1 so release can decrement */
return 0;
case PLEX86_RESET_CPU:
{
guest_cpu_t guest_cpu;
if (vm->mon_state != MON_STATE_RUNNABLE)
return -EINVAL;
get_cpu_reset_values(&guest_cpu);
if ( init_guest_cpu(vm, 0, &guest_cpu) ) {
return -EFAULT;
}
vm->mon_state = MON_STATE_RUNNABLE;
return 0;
}
case PLEX86_GET_CPU:
{
guest_cpu_t guest_cpu;
if ( (vm->mon_state != MON_STATE_RUNNABLE) &&
(vm->mon_state != MON_STATE_PANIC) )
return -EINVAL;
get_guest_cpu_state(vm, &guest_cpu);
if (copy_to_user((void *)arg, &guest_cpu, sizeof(guest_cpu)))
return -EFAULT;
return 0;
}
case PLEX86_SET_CPU:
{
guest_cpu_t guest_cpu;
if (vm->mon_state != MON_STATE_RUNNABLE)
return -EINVAL;
if (copy_from_user(&guest_cpu, (void *)arg, sizeof(guest_cpu)))
return -EFAULT;
printk(KERN_WARNING "cpu.cr0 = 0x%x\n", guest_cpu.cr0);
if ( init_guest_cpu(vm, 0, &guest_cpu) ) {
return -EFAULT;
}
vm->mon_state = MON_STATE_RUNNABLE;
return 0;
}
case PLEX86_FORCE_INT:
{
if (vm->mon_state != MON_STATE_RUNNABLE)
return -EINVAL;
vm->dbg_force_int = 0x100 | arg;
return 0;
}
default:
printk(KERN_WARNING "plex86: unknown ioctl(%d) called\n", cmd);
return -EINVAL;
}
}
int
#if LINUX_VERSION_CODE >= VERSION_CODE(2,1,0)
plex86_mmap(struct file * file, struct vm_area_struct * vma)
#else
plex86_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
#endif
{
vm_t *vm = (vm_t *)file->private_data;
int i, firstpage, nr_pages;
/* Must have memory allocated */
if (!vm->pages.guest_n_pages) {
printk(KERN_WARNING "plex86: device not initialized\n");
return -EACCES;
}
/* Private mappings make no sense ... */
if ( !(vma->vm_flags & VM_SHARED) ) {
printk(KERN_WARNING "plex86: private mapping\n");
return -EINVAL;
}
#if LINUX_VERSION_CODE < VERSION_CODE(2,3,25)
/* To simplify things, allow only page-aligned offsets */
if ( vma->vm_offset & (PAGE_SIZE - 1) ) {
printk(KERN_WARNING "plex86: unaligned offset %08lx\n", vma->vm_offset);
return -EINVAL;
}
#endif
#if LINUX_VERSION_CODE >= VERSION_CODE(2,3,25)
if ( vma->vm_pgoff < 0
|| vma->vm_pgoff + ((vma->vm_end - vma->vm_start) / PAGE_SIZE)
> vm->pages.guest_n_pages)
{
printk(KERN_WARNING "plex86: offset page %08lx out of range\n", vma->vm_pgoff);
return -EINVAL;
}
#else
/* Sanity check */
if ( vma->vm_offset < 0
|| vma->vm_offset + (vma->vm_end - vma->vm_start)
> vm->pages.guest_n_pages * PAGE_SIZE )
{
printk(KERN_WARNING "plex86: offset %08lx out of range\n", vma->vm_offset);
return -EINVAL;
}
#endif
/* Map all requested guest pages in ... */
#if LINUX_VERSION_CODE >= VERSION_CODE(2,3,25)
firstpage = vma->vm_pgoff;
#else
firstpage = vma->vm_offset / PAGE_SIZE;
#endif
nr_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
for ( i = 0; i < nr_pages; i++ )
if ( remap_page_range( vma->vm_start + i*PAGE_SIZE,
vm->pages.guest[firstpage+i] << 12,
PAGE_SIZE,
vma->vm_page_prot ) )
return -EAGAIN;
#if LINUX_VERSION_CODE < VERSION_CODE(2,1,0)
/* Enter our inode into the VMA; no need to change the default ops */
vma->vm_inode = inode;
inode->i_count++;
#endif
return 0;
}
/************************************************************************/
/* Status reporting: /proc code */
/************************************************************************/
int
plex86_read_procmem(char *buf, char **start, off_t offset,
#if LINUX_VERSION_CODE >= VERSION_CODE(2,4,0)
int len)
#else
int len, int unused)
#endif
{
unsigned i;
len = 0;
len += sprintf(buf, "monitor-->host interrupt reflection counts\n");
for (i=0; i<256; i++) {
if (redir_cnt[i])
len += sprintf(buf+len, " 0x%2x:%10u\n", i, redir_cnt[i]);
}
return(len);
}
/************************************************************************/
/* Paging management */
/************************************************************************/
static int
retrieve_vm_pages(Bit32u *page, int max_pages, void *addr, unsigned size)
{
/*
* Grrr. There doesn't seem to be an exported mechanism to retrieve
* the physical pages underlying a vmalloc()'ed area. We do it the
* hard way ...
*/
pageEntry_t *host_pgd;
Bit32u host_cr3;
Bit32u start_addr = (Bit32u)addr & ~(PAGESIZE-1);
int n_pages = ((Bit32u)addr + size - start_addr + PAGESIZE-1) >> 12;
int i;
if (!addr) {
printk(KERN_WARNING "plex86: retrieve_vm_pages: addr NULL!\n");
return 0;
}
if ( size > (max_pages*4096) ) {
printk(KERN_WARNING "plex86: retrieve_vm_pages: not enough pages!\n");
return 0;
}
asm volatile("movl %%cr3, %0" : "=r" (host_cr3));
host_pgd = (pageEntry_t *)(phys_to_virt(host_cr3 & ~0xfff));
for (i = 0; i < n_pages; i++)
{
Bit32u virt_addr = start_addr + i*PAGESIZE + KERNEL_OFFSET;
pageEntry_t *pde = host_pgd + (virt_addr >> 22);
pageEntry_t *pte = (pageEntry_t *)phys_to_virt(pde->fields.base << 12)
+ ((virt_addr >> 12) & 0x3ff);
/* If page isn't present, assume end of area */
if ( !pde->fields.P || ! pte->fields.P )
{
n_pages = i;
break;
}
/* Abort if our page list is too small */
if (i >= max_pages)
{
printk(KERN_WARNING "plex86: page list is too small!\n");
printk(KERN_WARNING "n_pages=%u, max_pages=%u\n",
n_pages, max_pages);
return 0;
}
page[i] = pte->fields.base;
}
return n_pages;
}
static int
retrieve_monitor_pages(void)
{
/*
* Retrieve start address and size of this module.
*
* Note that with old kernels, we cannot access the module info (size),
* hence we rely on the fact that Linux lets at least one page of
* virtual address space unused after the end of the module.
*/
#ifdef THIS_MODULE
void *start_addr = THIS_MODULE;
unsigned size = THIS_MODULE->size;
#else
void *start_addr = &mod_use_count_;
unsigned size = 0x10000000; /* Actual size determined below */
#endif
int n_pages;
n_pages = retrieve_vm_pages(monitor_pages.page, PLEX86_MAX_MONITOR_PAGES,
start_addr, size);
printk(KERN_WARNING "%u monitor pages located\n", n_pages);
monitor_pages.start_addr = (Bit32u)start_addr;
monitor_pages.n_pages = n_pages;
return n_pages;
}
static void
reserve_guest_pages(vm_t *vm)
{
vm_pages_t *pg = &vm->pages;
unsigned p;
/*
* As we want to map these pages to user space, we need to mark
* them as 'reserved' pages by setting the PG_reserved bit.
*
* This has the effect that:
* - remap_page_range accepts them as candidates for remapping
* - the swapper does *not* try to swap these pages out, even
* after they are mapped to user space
*/
for (p = 0; p < pg->guest_n_pages; p++)
#if LINUX_VERSION_CODE >= VERSION_CODE(2,4,0)
mem_map_reserve( (mem_map + pg->guest[p]) );
#else
mem_map_reserve(pg->guest[p]);
#endif
}
static void
unreserve_guest_pages(vm_t *vm)
{
vm_pages_t *pg = &vm->pages;
unsigned p;
/* Remove the PG_reserved flags before returning the pages */
for (p = 0; p < pg->guest_n_pages; p++)
#if LINUX_VERSION_CODE >= VERSION_CODE(2,4,0)
mem_map_unreserve( (mem_map + pg->guest[p]) );
#else
mem_map_unreserve(pg->guest[p]);
#endif
}
static unsigned
retrieve_phy_pages(Bit32u *page, int max_pages, void *addr_v, unsigned size)
{
/*
* Grrr. There doesn't seem to be an exported mechanism to retrieve
* the physical pages underlying a vmalloc()'ed area. We do it the
* hard way ...
*/
pageEntry_t *host_pgd;
Bit32u host_cr3;
/*Bit32u start_addr = (Bit32u)addr & ~(PAGESIZE-1); */
/*int n_pages = ((Bit32u)addr + size - start_addr + PAGESIZE-1) >> 12; */
int i;
Bit8u *addr;
unsigned n_pages;
addr = (Bit8u *) addr_v;
if ( ((Bit32u)addr) & 0xfff ) {
printk(KERN_WARNING "plex86: retrieve_phy_pages: not aligned!\n");
return 0;
}
n_pages = (size + PAGESIZE - 1) >> 12;
if (!addr) {
printk(KERN_WARNING "plex86: retrieve_phy_pages: addr NULL!\n");
return 0;
}
if ( n_pages > max_pages ) {
printk(KERN_WARNING "plex86: retrieve_phy_pages: size conflict\n");
return 0;
}
asm volatile("movl %%cr3, %0" : "=r" (host_cr3));
host_pgd = (pageEntry_t *)(phys_to_virt(host_cr3 & ~0xfff));
for (i = 0; i < n_pages; i++) {
Bit32u laddr;
pageEntry_t *pde;
pageEntry_t *pte;
laddr = KERNEL_OFFSET + ((Bit32u) addr);
pde = host_pgd + (laddr >> 22);
pte = ((pageEntry_t *)phys_to_virt(pde->fields.base << 12))
+ ((laddr >> 12) & 0x3ff);
if ( !pde->fields.P || ! pte->fields.P ) {
printk(KERN_WARNING "retrieve_phy_pages: P==0\n");
return 0;
}
page[i] = pte->fields.base;
addr += 4096;
}
return(n_pages);
}
/************************************************************************/
/* Miscellaneous callbacks */
/************************************************************************/
void
host_log_event(vm_t *vm)
{
/* If buffer is not in an inactive state (because monprint is */
/* writing to it), do not mess with it. */
if ( !vm->log_buffer_info.quiescent )
return;
if (vm->log_buffer_info.error) {
printk(KERN_WARNING "plex86: log buffer error!\n");
printk(KERN_WARNING " offset=%u\n", vm->log_buffer_info.offset);
printk(KERN_WARNING " wrap_count=%u\n", vm->log_buffer_info.wrap_count);
printk(KERN_WARNING "plex86: log message fmt:\n%s\n",
vm->host.addr.log_buffer);
}
else {
if (vm->log_buffer_info.wrap_count) {
printk(KERN_WARNING "plex86: log buffer wrapped %u time(s)\n",
vm->log_buffer_info.wrap_count);
}
printk(KERN_WARNING "plex86: log message(s):\n%s\n",
vm->host.addr.log_buffer);
}
vm->log_buffer_info.event = 0;
vm->log_buffer_info.wrap_count = 0;
vm->log_buffer_info.offset = 0;
vm->log_buffer_info.error = 0;
}
unsigned
host_idle(void)
{
if (NEED_RESCHED)
schedule();
return !current_got_fatal_signal();
}
void *
host_alloc(unsigned long size)
{
return vmalloc(size);
}
void
host_free(void *ptr)
{
vfree(ptr);
}
unsigned
host_map(Bit32u *page, int max_pages, void *ptr, unsigned size)
{
return( retrieve_phy_pages(page, max_pages, ptr, size) );
}
void *
host_alloc_page(void)
{
return (void *)get_free_page(GFP_KERNEL);
}
void
host_free_page(void *ptr)
{
free_page((Bit32u)ptr);
}
Bit32u
host_map_page(void *ptr)
{
if (!ptr) return 0;
/* return MAP_NR(ptr); */
return(__pa(ptr) >> PAGE_SHIFT);
}
void
doit_up(vm_t *vm)
{
printk(KERN_WARNING "Humm, host had IF=0\n");
}
void
doit2(void)
{
printk(KERN_WARNING "doit2\n");
}
void
host_oh_crap(vm_t *vm)
{
printk(KERN_WARNING "Oh Crap!\n");
}