* Avi Kivity <[EMAIL PROTECTED]> wrote: > > Does this look good to you? I'd like the basic API to be as light as > > possible. > > Won't 32-bit and 64-bit pick different registers? > > We can work around it (call is_long_mode() when decoding the > hypercall), but it kind of defeats the purpose of the optimization, > no?
well, we can standardize on the 32-bit calling convention: eax, ecx, edx, ebp, etc. We can do that via the 64-bit asm. So it should be the same i think - just that a 32-bit guest on a 64-bit host wont be able to set the high bits of those registers. I've attached my current patch (ontop of tarball) - that's one idea about how it could look like. Note that i didnt introduce a syscall function table in kvm_handle_hypercall() - this will be the more optimal solution until the # of hypercalls is relatively low. That way we only prepare the parameters that are truly needed. you are right in that we cannot call the syscall functions directly and that in practice we'll shuffle things around - but we have to check the first parameter anyway, and the shuffling isnt /that/ big of a problem. I wanted to keep the guest-side as low-impact as possible, so that a native kernel's instruction sequence is not disturbed too much by the presence of a NOP hypercall. in fact it would probably be more logical to use the standard syscall order: eax, ebx, ecx, edx, esi, edi, ebp? Ingo Index: linux/arch/i386/kernel/paravirt.c =================================================================== --- linux.orig/arch/i386/kernel/paravirt.c +++ linux/arch/i386/kernel/paravirt.c @@ -888,29 +888,40 @@ asm ( " ret \n" ); -extern unsigned char hypercall_addr[4]; +extern unsigned char hypercall_addr[6]; - -static inline int -kvm_hypercall(void *param1, void *param2, void *param3, void *param4) -{ - int ret = -1; - - asm (" call hypercall_addr\n" - : "=g" (ret) - : "eax" (param1), - "ecx" (param2), - "edx" (param3), - "ebp" (param4)); - - return ret; -} +#define hypercall1(nr) \ +({ \ + int __ret; \ + \ + asm (" call hypercall_addr\n" \ + : "=g" (__ret) \ + : "eax" (nr) \ + ); \ + __ret; \ +}) + +#define hypercall2(nr, p1) \ +({ \ + int __ret; \ + \ + asm (" call hypercall_addr\n" \ + : "=g" (__ret) \ + : "eax" (nr), \ + "ecx" (p1) \ + ); \ + __ret; \ +}) void test_hypercall(void) { - int ret = kvm_hypercall((void *)1, (void *)2, (void *)3, (void *)4); + int ret; + + ret = hypercall1(__NR_hypercall_load_cr3); + printk(KERN_DEBUG "hypercall test #1, ret: %d\n", ret); - printk(KERN_DEBUG "hypercall test, ret: %d\n", ret); + ret = hypercall2(0xbad, 0xbad); + printk(KERN_DEBUG "hypercall test #2, ret: %d\n", ret); } int kvm_guest_register_para(int cpu) Index: linux/drivers/kvm/kvm.h =================================================================== --- linux.orig/drivers/kvm/kvm.h +++ linux/drivers/kvm/kvm.h @@ -639,4 +639,6 @@ static inline u32 get_rdx_init_val(void) #define TSS_REDIRECTION_SIZE (256 / 8) #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) +extern int kvm_handle_hypercall(struct kvm_vcpu *vcpu); + #endif Index: linux/drivers/kvm/kvm_main.c =================================================================== --- linux.orig/drivers/kvm/kvm_main.c +++ linux/drivers/kvm/kvm_main.c @@ -1138,6 +1138,32 @@ int emulate_instruction(struct kvm_vcpu } EXPORT_SYMBOL_GPL(emulate_instruction); +int hypercall_load_cr3(struct kvm_vcpu *vcpu, unsigned long new_cr3) +{ + printk("not yet\n"); + + return -ENOSYS; +} + +int kvm_handle_hypercall(struct kvm_vcpu *vcpu) +{ + int nr = vcpu->regs[VCPU_REGS_RAX]; + int ret = -EINVAL; + + switch (nr) { + case __NR_hypercall_load_cr3: + + ret = hypercall_load_cr3(vcpu, vcpu->regs[VCPU_REGS_RCX]); + break; + default: + printk(KERN_DEBUG "invalid hypercall %d\n", nr); + } + vcpu->regs[VCPU_REGS_RAX] = ret; + + return 1; +} +EXPORT_SYMBOL_GPL(kvm_handle_hypercall); + static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; Index: linux/drivers/kvm/vmx.c =================================================================== --- linux.orig/drivers/kvm/vmx.c +++ linux/drivers/kvm/vmx.c @@ -1032,7 +1032,7 @@ static int vmcs_setup_cr3_cache(struct k cr3_target_values = (msr_val >> 16) & ((1 << 10) - 1); printk(KERN_DEBUG " cr3 target values: %d\n", cr3_target_values); if (cr3_target_values > KVM_CR3_CACHE_SIZE) { - printk(KERN_WARN "KVM: limiting cr3 cache size from %d to %d\n", + printk(KERN_WARNING "KVM: limiting cr3 cache size from %d to %d\n", cr3_target_values, KVM_CR3_CACHE_SIZE); cr3_target_values = KVM_CR3_CACHE_SIZE; } @@ -1726,16 +1726,12 @@ static int handle_halt(struct kvm_vcpu * static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->exit_reason = KVM_EXIT_DEBUG; - printk(KERN_DEBUG "got vmcall at RIP %08lx\n", vmcs_readl(GUEST_RIP)); - printk(KERN_DEBUG "vmcall params: %08lx, %08lx, %08lx, %08lx\n", - vcpu->regs[VCPU_REGS_RAX], - vcpu->regs[VCPU_REGS_RCX], - vcpu->regs[VCPU_REGS_RDX], - vcpu->regs[VCPU_REGS_RBP]); - vcpu->regs[VCPU_REGS_RAX] = 0; + kvm_handle_hypercall(vcpu); vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3); + return 1; } + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs Index: linux/include/linux/kvm_para.h =================================================================== --- linux.orig/include/linux/kvm_para.h +++ linux/include/linux/kvm_para.h @@ -72,4 +72,7 @@ struct kvm_vcpu_para_state { #define KVM_EINVAL EINVAL +#define __NR_hypercall_load_cr3 0 +#define __NR_hypercalls 1 + #endif ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys - and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV _______________________________________________ kvm-devel mailing list kvm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/kvm-devel