[PATCH] kvm: external module: do not hardcode tsc_khz

2009-03-25 Thread Avi Kivity
From: Marcelo Tosatti mtosa...@redhat.com

external module compat hard codes tsc_khz as 200 if KERNEL_VERSION 
(2,6,23).

This breaks kvmclock on hosts with different frequency.

While tsc_khz was only exported on 2.6.23, the majority of relevant
older v2.6 based distros seem to have it exported.

Signed-off-by: Marcelo Tosatti mtosa...@redhat.com
Signed-off-by: Avi Kivity a...@redhat.com

diff --git a/kernel/external-module-compat-comm.h 
b/kernel/external-module-compat-comm.h
index a14cea2..e0dc577 100644
--- a/kernel/external-module-compat-comm.h
+++ b/kernel/external-module-compat-comm.h
@@ -387,15 +387,11 @@ static inline struct page *__kvm_vm_fault(struct 
vm_area_struct *vma,
 #endif
 
 #if LINUX_VERSION_CODE  KERNEL_VERSION(2,6,23)
-
-static unsigned  __attribute__((__used__)) kvm_tsc_khz = 200;
-
-#else
+extern unsigned int tsc_khz;
+#endif
 
 #define kvm_tsc_khz tsc_khz
 
-#endif
-
 #if LINUX_VERSION_CODE = KERNEL_VERSION(2,6,21)
 
 #include linux/ktime.h
--
To unsubscribe from this list: send the line unsubscribe kvm-commits in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] kvm: extboot: Update number of HDs reported by BIOS

2009-03-25 Thread Avi Kivity
From: Gleb Natapov g...@redhat.com

The Vista boot loaded expects the bios to report a correct count of the
bootable disks; and extboot disks are bootable.

This fixes Vista boot from virtio-blk issue.

Signed-off-by: Gleb Natapov g...@redhat.com
Signed-off-by: Avi Kivity a...@redhat.com

diff --git a/extboot/extboot.S b/extboot/extboot.S
index e3d1adf..1e60f68 100644
--- a/extboot/extboot.S
+++ b/extboot/extboot.S
@@ -32,6 +32,9 @@ _start:
xor %ax, %ax
mov %ax, %ds
 
+   /* there is one more bootable HD */
+   incb 0x0475
+
/* save old int 19 */
mov (0x19*4), %eax
mov %eax, %cs:old_int19
--
To unsubscribe from this list: send the line unsubscribe kvm-commits in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: VMX: Zero the vpid module parameter if vpid is not supported

2009-03-25 Thread Avi Kivity
From: Avi Kivity a...@redhat.com

This allows reading back how the hardware is configured.

Signed-off-by: Avi Kivity a...@redhat.com

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 47b94ae..8b1b9b8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1202,6 +1202,9 @@ static __init int setup_vmcs_config(struct vmcs_config 
*vmcs_conf)
  vmx_capability.ept, vmx_capability.vpid);
}
 
+   if (!cpu_has_vmx_vpid())
+   enable_vpid = 0;
+
min = 0;
 #ifdef CONFIG_X86_64
min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
@@ -2082,7 +2085,7 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
int vpid;
 
vmx-vpid = 0;
-   if (!enable_vpid || !cpu_has_vmx_vpid())
+   if (!enable_vpid)
return;
spin_lock(vmx_vpid_lock);
vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
--
To unsubscribe from this list: send the line unsubscribe kvm-commits in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: Fix interrupt unhalting a vcpu when it shouldn't

2009-03-25 Thread Avi Kivity
From: Gleb Natapov g...@redhat.com

kvm_vcpu_block() unhalts vpu on an interrupt/timer without checking
if interrupt window is actually opened.

Signed-off-by: Gleb Natapov g...@redhat.com
Signed-off-by: Avi Kivity a...@redhat.com

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index fcfb110..d8f43e4 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1961,6 +1961,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
return 0;
 }
 
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+   /* do real check here */
+   return 1;
+}
+
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
return vcpu-arch.timer_fired;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9057335..2cf915e 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -41,6 +41,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
return !!(v-arch.pending_exceptions);
 }
 
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+   /* do real check here */
+   return 1;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 {
return !(v-arch.msr  MSR_WE);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 0189356..4ed4c3a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -318,6 +318,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
return rc;
 }
 
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+   /* do real check here */
+   return 1;
+}
+
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
return 0;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4627627..8351c4d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -521,7 +521,7 @@ struct kvm_x86_ops {
void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
   struct kvm_run *run);
-
+   int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
int (*get_mt_mask_shift)(void);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 04ee964..1fcbc17 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2268,6 +2268,15 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
vmcb-control.intercept_cr_write |= INTERCEPT_CR8_MASK;
 }
 
+static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+   struct vcpu_svm *svm = to_svm(vcpu);
+   struct vmcb *vmcb = svm-vmcb;
+   return (vmcb-save.rflags  X86_EFLAGS_IF) 
+   !(vmcb-control.int_state  SVM_INTERRUPT_SHADOW_MASK) 
+   (svm-vcpu.arch.hflags  HF_GIF_MASK);
+}
+
 static void svm_intr_assist(struct kvm_vcpu *vcpu)
 {
struct vcpu_svm *svm = to_svm(vcpu);
@@ -2647,6 +2656,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.exception_injected = svm_exception_injected,
.inject_pending_irq = svm_intr_assist,
.inject_pending_vectors = do_interrupt_requests,
+   .interrupt_allowed = svm_interrupt_allowed,
 
.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index da6461d..b9e06b0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2490,6 +2490,12 @@ static void vmx_update_window_states(struct kvm_vcpu 
*vcpu)
 GUEST_INTR_STATE_MOV_SS)));
 }
 
+static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+   vmx_update_window_states(vcpu);
+   return vcpu-arch.interrupt_window_open;
+}
+
 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
   struct kvm_run *kvm_run)
 {
@@ -3691,7 +3697,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.exception_injected = vmx_exception_injected,
.inject_pending_irq = vmx_intr_assist,
.inject_pending_vectors = do_interrupt_requests,
-
+   .interrupt_allowed = vmx_interrupt_allowed,
.set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level,
.get_mt_mask_shift = vmx_get_mt_mask_shift,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9cdfe1b..ae4918c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4468,3 +4468,8 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
put_cpu();
 }
+
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+   return kvm_x86_ops-interrupt_allowed(vcpu);
+}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 11eb702..095ebb6 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -298,6 +298,7 @@ int kvm_arch_hardware_setup(void);
 void kvm_arch_hardware_unsetup(void);
 void kvm_arch_check_processor_compat(void *rtn);
 int 

Re: [PATCH 1/1] KVM: Correct wrong vmcs reading

2009-03-25 Thread Avi Kivity

Sheng Yang wrote:

Some 64bit reading should be natrual wide reading...
  


Applied, thanks.

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM on Via Nano (Isaiah) CPUs? Virus checked

2009-03-25 Thread Andreas Tanz
 Avi Kivity wrote:
 
  Will talk to the specification and come up with further tests.
 
 
 Please printk() vmcs_readl(GUEST_RFLAGS) (where you printk kvm_rip_read()).
 

vmx.c:
2637 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2638 {
...
2687 error_code = 0;
2688 rip = kvm_rip_read(vcpu);
2689 unsigned long debug_guest_rflags = vmcs_readl(GUEST_RFLAGS);
2690 printk(KERN_ERR vmx-handle_exception 0a : kvm_rip_read(vcpu) 
returned 0x%lx ; vmcs_readl(GUEST_RFLAGS) returned 0x%x
2691 if (intr_info  INTR_INFO_DELIVER_CODE_MASK)
2692 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
...

# dmesg
...
[63930.809295] returning from kvm_handle_exit, cause 3, retval = 0, exit_reason 
= 30
[63930.809313] vmx-vmx_vcpu_run() 00 : vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) 
returned 0x0
[63930.809326] returning from kvm_handle_exit, cause 3, retval = 0, exit_reason 
= 30
[63930.809344] vmx-vmx_vcpu_run() 00 : vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) 
returned 0x0
[63930.809358] returning from kvm_handle_exit, cause 3, retval = 0, exit_reason 
= 30
[63930.809376] vmx-vmx_vcpu_run() 00 : vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) 
returned 0x0
[63930.809388] returning from kvm_handle_exit, cause 3, retval = 1, exit_reason 
= 7
[63930.809399] vmx-vmx_vcpu_run() 00 : vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) 
returned 0x8408
[63930.809412] vmx-handle_exception 00 : giving some infos
[63930.809417] vmx-handle_exception 01 : vect_info: 0x0
[63930.809423] vmx-handle_exception 02 : intr_info: 0x8b0d, 
is_page_fault()==0
[63930.809429] vmx-handle_exception 03 : irq_chip_in_kernel()==1
[63930.809434] vmx-handle_exception 04 : is_external_interrupt()==0
[63930.809440] vmx-handle_exception 0a : kvm_rip_read(vcpu) returned 0x3154 ; 
vmcs_readl(GUEST_RFLAGS) returned 0x33202 
[63930.809447] vmx-handle_exception 0f : vcpu-arch.rmode.active: 0x1
[63930.809454] vmx-handle_exception 10 : handle_rmode_exception(vcpu, 
intr_info  INTR_INFO_VECTOR_MASK, error_code) returned 0x1
[63930.809462] returning from kvm_handle_exit, cause 3, retval = 1, exit_reason 
= 0
[63930.809474] vmx-vmx_vcpu_run() 00 : vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) 
returned 0x840d
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: IA64: enable external interrupt in vmm

2009-03-25 Thread Zhang, Yang
From 2cf75eff171ef823b3b17c945504d0233a6bd427 Mon Sep 17 00:00:00 2001
From: Yang Zhang yang.zh...@intel.com
Date: Mon, 23 Mar 2009 03:31:04 -0400
Subject: [PATCH] KVM: IA64: enable external interrupt in vmm

In the previous version, the interrupt bit is cleared when in
the vmm. This patch opens the bit and the externanl interrupt can
be deal with when in the vmm. It will improve the I/O performance.

Signed-off-by: Yang Zhang yang.zh...@intel.com
---
 arch/ia64/kvm/process.c |5 +
 arch/ia64/kvm/vmm_ivt.S |   18 +-
 arch/ia64/kvm/vtlb.c|3 +++
 3 files changed, 17 insertions(+), 9 deletions(-)

diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index b1dc809..a8f84da 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -652,20 +652,25 @@ void  kvm_ia64_handle_break(unsigned long ifa, struct 
kvm_pt_regs *regs,
unsigned long isr, unsigned long iim)
 {
struct kvm_vcpu *v = current_vcpu;
+   long psr;
 
if (ia64_psr(regs)-cpl == 0) {
/* Allow hypercalls only when cpl = 0.  */
if (iim == DOMN_PAL_REQUEST) {
+   local_irq_save(psr);
set_pal_call_data(v);
vmm_transition(v);
get_pal_call_result(v);
vcpu_increment_iip(v);
+   local_irq_restore(psr);
return;
} else if (iim == DOMN_SAL_REQUEST) {
+   local_irq_save(psr);
set_sal_call_data(v);
vmm_transition(v);
get_sal_call_result(v);
vcpu_increment_iip(v);
+   local_irq_restore(psr);
return;
}
}
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 3ef1a01..40920c6 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -95,7 +95,7 @@ GLOBAL_ENTRY(kvm_vmm_panic)
;;
srlz.i// guarantee that interruption collection is on
;;
-   //(p15) ssm psr.i   // restore psr.i
+   (p15) ssm psr.i   // restore psr.
addl r...@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -249,7 +249,7 @@ ENTRY(kvm_break_fault)
;;
srlz.i // guarantee that interruption collection is on
;;
-   //(p15)ssm psr.i   // restore psr.i
+   (p15)ssm psr.i   // restore psr.i
addl r...@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -439,7 +439,7 @@ kvm_dispatch_vexirq:
;;
srlz.i // guarantee that interruption collection is on
;;
-   //(p15) ssm psr.i   // restore psr.i
+   (p15) ssm psr.i   // restore psr.i
adds r3=8,r2// set up second base pointer
;;
KVM_SAVE_REST
@@ -819,7 +819,7 @@ ENTRY(kvm_dtlb_miss_dispatch)
;;
srlz.i // guarantee that interruption collection is on
;;
-   //(p15) ssm psr.i   // restore psr.i
+   (p15) ssm psr.i   // restore psr.i
addl r...@gprel(ia64_leave_hypervisor_prepare),gp
;;
KVM_SAVE_REST
@@ -842,7 +842,7 @@ ENTRY(kvm_itlb_miss_dispatch)
;;
srlz.i   // guarantee that interruption collection is on
;;
-   //(p15) ssm psr.i   // restore psr.i
+   (p15) ssm psr.i   // restore psr.i
addl r...@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -871,7 +871,7 @@ ENTRY(kvm_dispatch_reflection)
;;
srlz.i   // guarantee that interruption collection is on
;;
-   //(p15) ssm psr.i   // restore psr.i
+   (p15) ssm psr.i   // restore psr.i
addl r...@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -898,7 +898,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
;;
srlz.i// guarantee that interruption collection is on
;;
-   //(p15) ssm psr.i   // restore psr.i
+   (p15) ssm psr.i   // restore psr.i
addl r...@gprel(ia64_leave_hypervisor_prepare),gp
;;
KVM_SAVE_REST
@@ -920,7 +920,7 @@ ENTRY(kvm_dispatch_interrupt)
;;
srlz.i
;;
-   //(p15) ssm psr.i
+   (p15) ssm psr.i
addl r...@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -1333,7 +1333,7 @@ hostret =   r24
;;
 (p7)srlz.i
;;
-//(p6)ssm psr.i
+(p6)ssm psr.i
;;
mov rp=rpsave
mov ar.pfs=pfssave
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 38232b3..d46e267 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -253,7 +253,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
(p7) st8 [%2]=r9;;
 

Re: Split kvm source tarballs

2009-03-25 Thread Anthony Liguori

Avi Kivity wrote:

Anthony Liguori wrote:


Hrm, I notice that for qemu-kvm, you basically just pulled out the 
module source.




That's step 1.

What do you think about going a step further and building a proper 
qemu repository?




Step 2.  I have scripts that do that and will merge with Eduardo's 
script that also rewrites the qemu commit hashes to correspond to 
qemu.git on git.kernel.org.


That's what I figured.  FWIW, the split tarballs work just fine for me.

It may be worth waiting to do step 2 until the IO thread is merged.  I 
think once that happens, we could probably do a sprint to get rid of 
libkvm in kvm-userspace.  That would certainly simplify things.


Regards,

Anthony Liguori
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


kvm-guest-drivers-linux-1 virtio_net crash with 2.6.18.8 kernel

2009-03-25 Thread Takeshi Sone
Hello,

I built kvm-guest-drivers-linux-1 on vanilla 2.6.18.8 kernel.
virtio_blk worked fine.
However virtio_net crashes right after enabling the device with ifconfig.

This happens when the net device is connected to host tap.
It works fine with -net user.

Host is kvm-84 on 2.6.28 (Ubuntu Jaunty).


Crash dump #1:

sarge:~# ifconfig eth0 up
BUG: unable to handle kernel paging request at virtual address ed8ae9b4
 printing eip:
e084f3ef
*pde = 
Oops: 0002 [#1]
SMP
Modules linked in: virtio_net ipv6 thermal fan button processor ac
battery e1000 ide_generic ide_core ext3 jbd mbcache virtio_blk
virtio_pci virtio_ring virtio
CPU:0
EIP:0060:[e084f3ef]Not tainted VLI
EFLAGS: 00010202   (2.6.18.8 #1)
EIP is at virtnet_poll+0x6f/0x1b0 [virtio_net]
eax: ed8ae9b4   ebx: dfd3bc00   ecx: dfed8800   edx: 2c5b835d
esi: de432adc   edi: de432ac0   ebp: 0001   esp: c175be50
ds: 007b   es: 007b   ss: 0068
Process ifconfig (pid: 939, ti=c175a000 task=dfafdaa0 task.ti=c175a000)
Stack: c018bbab 0010 c175be7c de432800 00ae de432800 
c1405a94
   c1405a80 c026e9df 39b6 012c 0001 c037bf58 c03bee20
c03bd2c0
   c0127e69  000a 0246  1043 
c0127eb5
Call Trace:
 [c018bbab] mntput_no_expire+0x1b/0x90
 [c026e9df] net_rx_action+0x8f/0x1d0
 [c0127e69] __do_softirq+0xc9/0xe0
 [c0127eb5] do_softirq+0x35/0x40
 [c0127d91] local_bh_enable_ip+0x41/0x50
 [c026d5ba] dev_open+0x4a/0x80
 [c026f1d7] dev_change_flags+0x47/0x110
 [c02b3fe9] devinet_ioctl+0x239/0x550
 [c02b5f53] inet_ioctl+0x53/0x80
 [c0263890] sock_ioctl+0xd0/0x200
 [c015f56a] __handle_mm_fault+0x10a/0x230
 [c02637c0] sock_ioctl+0x0/0x200
 [c0182297] do_ioctl+0x77/0x90
 [c018245e] vfs_ioctl+0x5e/0x1d0
 [c0118cd2] do_page_fault+0x142/0x550
 [c018262b] sys_ioctl+0x5b/0x90
 [c0103103] syscall_call+0x7/0xb
Code: 73 41 8b 47 04 8d 54 24 10 8b 48 08 ff 51 08 85 c0 89 c3 74 2e ff
4e 08 45 8b 13 c7 03 00 00 00 00 8b 43 04 c7 43 04 00 00 00 00 89 10
89 42 04 8b 4c 24 10 89 da 8b 47 0c e8 2e fc ff ff ff 4f
EIP: [e084f3ef] virtnet_poll+0x6f/0x1b0 [virtio_net] SS:ESP 0068:c175be50
 0Kernel panic - not syncing: Fatal exception in interrupt


Crash dump #2:


sarge:~# ifconfig eth1 up
BUG: warning at include/linux/skbuff.h:1006/__skb_trim()
 [e09921dd] receive_skb+0x1ad/0x1c0 [virtio_net]
 [e0992402] virtnet_poll+0x82/0x1b0 [virtio_net]
 [c018e7bb] mntput_no_expire+0x1b/0x90
 [c025c9d9] net_rx_action+0x99/0x250
 [c0129209] __do_softirq+0xc9/0xe0
 [c0129255] do_softirq+0x35/0x40
 [c012913d] local_bh_enable_ip+0x4d/0x50
 [c025b4fa] dev_open+0x4a/0x80
 [c025d247] dev_change_flags+0x47/0x110
 [c029f6e9] devinet_ioctl+0x239/0x550
 [c02a17b3] inet_ioctl+0x53/0x80
 [c0251590] sock_ioctl+0xc0/0x1f0
 [c01614e8] __handle_mm_fault+0x108/0x230
 [c02514d0] sock_ioctl+0x0/0x1f0
 [c0184b87] do_ioctl+0x77/0x90
 [c0184d4e] vfs_ioctl+0x5e/0x1d0
 [c0119de2] do_page_fault+0x342/0x530
 [c0184f1b] sys_ioctl+0x5b/0x90
 [c010323f] syscall_call+0x7/0xb
BUG: unable to handle kernel paging request at virtual address 69643a70
 printing eip:
c02693a1
*pde = 
Oops:  [#1]
PREEMPT SMP
Modules linked in: virtio_net ipv6 thermal fan button processor ac
battery tsdev ide_cd cdrom floppy psmouse rtc i2c_piix4 i2c_core piix
generic ide_core e1000 ext3 jbd mbcache virtio_blk virtio_pci
virtio_ring virtio
CPU:0
EIP:0060:[c02693a1]Not tainted VLI
EFLAGS: 00010a06   (2.6.18.8 #1)
EIP is at eth_type_trans+0x31/0x100
eax: 69643a7e   ebx: df26b9c0   ecx: 69643a70   edx: de665800
esi: 69643a70   edi: df26b9c0   ebp: 0001   esp: de5d5e20
ds: 007b   es: 007b   ss: 0068
Process ifconfig (pid: 1828, ti=de5d4000 task=de5aacf0 task.ti=de5d4000)
Stack: df26b9c0 df26b9f0 de665800 e0992084 e0992b90 e0992bac 03ee
e0992aaa
   df26b9c0 de665c1c de665c00 e0992402 c018e7bb 0010 de5d5e7c
de665800
   00bf de665800  c1406618 c1406600 c025c9d9 1d76
012c
Call Trace:
 [e0992084] receive_skb+0x54/0x1c0 [virtio_net]
 [e0992402] virtnet_poll+0x82/0x1b0 [virtio_net]
 [c018e7bb] mntput_no_expire+0x1b/0x90
 [c025c9d9] net_rx_action+0x99/0x250
 [c0129209] __do_softirq+0xc9/0xe0
 [c0129255] do_softirq+0x35/0x40
 [c012913d] local_bh_enable_ip+0x4d/0x50
 [c025b4fa] dev_open+0x4a/0x80
 [c025d247] dev_change_flags+0x47/0x110
 [c029f6e9] devinet_ioctl+0x239/0x550
 [c02a17b3] inet_ioctl+0x53/0x80
 [c0251590] sock_ioctl+0xc0/0x1f0
 [c01614e8] __handle_mm_fault+0x108/0x230
 [c02514d0] sock_ioctl+0x0/0x1f0
 [c0184b87] do_ioctl+0x77/0x90
 [c0184d4e] vfs_ioctl+0x5e/0x1d0
 [c0119de2] do_page_fault+0x342/0x530
 [c0184f1b] sys_ioctl+0x5b/0x90
 [c010323f] syscall_call+0x7/0xb
Code: 88 a0 00 00 00 89 48 24 8b 40 60 83 f8 0d 76 18 83 e8 0e 3b 47 64
89 47 60 0f 82 c4 00 00 00 8d 41 0e 89 87 a0 00 00 00 8b 77 24 f6 06
01 74 7a 0f b7 82 38 01 00 00 8d 9a 38 01 00 00 0f b7 0e
EIP: [c02693a1] eth_type_trans+0x31/0x100 SS:ESP 0068:de5d5e20
 0Kernel panic - not syncing: Fatal exception in interrupt


Crash dump #3:


sarge:~# ifconfig 

Re: kvm: qemu: stop/start cpus before/after devices

2009-03-25 Thread Marcelo Tosatti
On Wed, Mar 25, 2009 at 01:45:52PM +0200, Avi Kivity wrote:
 Marcelo Tosatti wrote:
 From: Yaniv Kamay ya...@qumranet.com

 Stop cpus before devices when stopping the VM, start cpus after devices
 when starting VM.

   

 Why is this needed?

A vcpu could access a stopped device otherwise. 
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: kvm: qemu: stop/start cpus before/after devices

2009-03-25 Thread Marcelo Tosatti
On Wed, Mar 25, 2009 at 11:26:19AM -0300, Marcelo Tosatti wrote:
 On Wed, Mar 25, 2009 at 01:45:52PM +0200, Avi Kivity wrote:
  Marcelo Tosatti wrote:
  From: Yaniv Kamay ya...@qumranet.com
 
  Stop cpus before devices when stopping the VM, start cpus after devices
  when starting VM.
 

 
  Why is this needed?
 
 A vcpu could access a stopped device otherwise. 

Actually on vm_stop its safe because the order happens to be correct,
but on vm_start its the other way around (vcpus start first, and they
should be started last).
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: kvm: qemu: stop/start cpus before/after devices

2009-03-25 Thread Avi Kivity

Marcelo Tosatti wrote:

On Wed, Mar 25, 2009 at 11:26:19AM -0300, Marcelo Tosatti wrote:
  

On Wed, Mar 25, 2009 at 01:45:52PM +0200, Avi Kivity wrote:


Marcelo Tosatti wrote:
  

From: Yaniv Kamay ya...@qumranet.com

Stop cpus before devices when stopping the VM, start cpus after devices
when starting VM.

  


Why is this needed?
  
A vcpu could access a stopped device otherwise. 



Actually on vm_stop its safe because the order happens to be correct,
but on vm_start its the other way around (vcpus start first, and they
should be started last).
  


Right, applied the patch, thanks.

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


KVM Port

2009-03-25 Thread kvm port
Hi KVM Gurus,

We have a EVB with a fpga based RISC processor with VT support.
As a proof of concept i have to port KVM onto it. we have run linux as of now.
can anyof u help with how should i begin

-thanks n regards
kvport bunch
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] compute checksum for roms bigger than a segment

2009-03-25 Thread Glauber Costa
Some option roms (e1000 provided by gpxe project as an example)
are bigger than a segment. The current algorithm to compute the
checksum fails in such case. To proper compute the checksum, this
patch deals with the possibility of the rom's size crossing a
segment border.

We don't need to worry about it crossing more than one segment
border, since the option roms format only save one byte to store
the image size (thus, maximum size = 0xff = 128k = 2 segments)

[ including improvements suggested by malc ]

Signed-off-by: Glauber Costa glom...@redhat.com
---
 bios/rombios.c |   33 +++--
 1 files changed, 27 insertions(+), 6 deletions(-)

diff --git a/bios/rombios.c b/bios/rombios.c
index bc43251..5e7ad24 100644
--- a/bios/rombios.c
+++ b/bios/rombios.c
@@ -10162,22 +10162,43 @@ no_serial:
   ret
 
 rom_checksum:
-  push ax
-  push bx
-  push cx
+  pusha
+  push ds
+
   xor  ax, ax
   xor  bx, bx
   xor  cx, cx
+  xor  dx, dx
+
   mov  ch, [2]
   shl  cx, #1
+
+  jnc checksum_loop
+  xchg dx, cx
+  dec  cx
+
 checksum_loop:
   add  al, [bx]
   inc  bx
   loop checksum_loop
+
+  test dx, dx
+  je checksum_out
+
+  add  al, [bx]
+  mov  cx, dx
+  mov  dx, ds
+  add  dh, #0x10
+  mov  ds, dx
+  xor  dx, dx 
+  xor  bx, bx
+
+  jmp  checksum_loop
+
+checksum_out:
   and  al, #0xff
-  pop  cx
-  pop  bx
-  pop  ax
+  pop  ds
+  popa 
   ret
 
 
-- 
1.6.2

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] kvm-autotest: write relative path in html report

2009-03-25 Thread Ryan Harper
When generating an html report from make_html_report.py, one needs to
supply the full path to the results directory.  This value ends up being
embedded in the output which breaks relocating the results dir to a
different path.  This patch adds a new flag that supresses the full path
value when generating the report.  I'm looking to generate a summary
html report in the results dir and relocate the results dir to a
different server which can be done with this patch.

-- 
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
ry...@us.ibm.com

diffstat output:
 make_html_report.py |   12 ++--
 1 files changed, 10 insertions(+), 2 deletions(-)

Signed-off-by: Ryan Harper ry...@us.ibm.com
---
diff --git a/client/tests/kvm_runtest_2/make_html_report.py 
b/client/tests/kvm_runtest_2/make_html_report.py
index ddb896f..3d0326a 100755
--- a/client/tests/kvm_runtest_2/make_html_report.py
+++ b/client/tests/kvm_runtest_2/make_html_report.py
@@ -1655,8 +1655,9 @@ def get_kvm_version(result_dir):
 def main(argv):
 dirname = None
 output_file_name = None
+relative_path = False
 try:
-opts, args = getopt.getopt(argv, r:f:h, ['help'])
+opts, args = getopt.getopt(argv, r:f:h:R, ['help'])
 except getopt.GetoptError:
 usage()
 sys.exit(2)
@@ -1668,10 +1669,17 @@ def main(argv):
 dirname =  arg
 elif opt == '-f':
 output_file_name =  arg
+elif opt == '-R':
+relative_path = True
 else:
 usage()
 sys.exit(1)
 
+html_path = dirname
+# don't use absolute path in html output if relative flag passed
+if relative_path:
+html_path = ''
+
 if dirname:
 if os.path.isdir(dirname): # TBD: replace it with a validation of 
autotest result dir
 res_dir = os.path.abspath(dirname)
@@ -1704,7 +1712,7 @@ def main(argv):
 'kvmver':get_kvm_version(dirname)
 }
 
-make_html_file(metalist, results_data, tag, host, 
output_file_name, dirname)
+make_html_file(metalist, results_data, tag, host, 
output_file_name, html_path)
 sys.exit(0)
 else:
 print 'Invalid result directory %s' % dirname
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


IO on guest is 20 times slower than host

2009-03-25 Thread Kurt Yoder

Hello list,

I'm puzzled as to why my KVM guest has IO that is 20 times slower than  
the KVM host:


m...@guest:~$ dd if=/dev/zero of=/tmp/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 8.36612 s, 6.1 MB/s
m...@guest:~$ df -h
FilesystemSize  Used Avail Use% Mounted on
/dev/vda1 9.2G  587M  8.2G   7% /
tmpfs 123M 0  123M   0% /lib/init/rw
varrun123M   28K  123M   1% /var/run
varlock   123M 0  123M   0% /var/lock
udev  123M  2.6M  120M   3% /dev
tmpfs 123M 0  123M   0% /dev/shm



m...@host:~$ dd if=/dev/zero of=/tmp/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 0.47188 s, 109 MB/s



The VM is started via libvirt and is the only VM running on an  
otherwise-unused system:


root 19915 1  0 Mar24 ?00:00:00   /usr/sbin/libvirtd -d
nobody   19937 19915  0 Mar24 ?00:00:00 dnsmasq --keep-in- 
foreground --strict-order --bind-interfaces --pid-file  --conf-file  -- 
listen-address 192.168.122.1 --except-interface lo --dhcp-leasefile=/ 
var/lib/libvirt/dhcp-default.leases --dhcp-range  
192.168.122.2,192.168.122.254
root 24354 19915  2 09:52 ?00:02:33 /usr/bin/kvm -S -M  
pc -m 256 -smp 1 -name guest -monitor pty -boot c -drive file=/dev/ 
HW_RAID/Guest,if=virtio,index=0,boot=on -net  
nic,macaddr=52:54:00:ba:e6:db,vlan=0,model=virtio -net  
tap,fd=11,script=,vlan=0,ifname=vnet1 -serial none -parallel none -usb  
-vnc 127.0.0.1:1




Can someone suggest where to start looking? Thanks,

-Kurt
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Can't boot guest with more than 3585MB when using large pages

2009-03-25 Thread Marcelo Tosatti
On Tue, Mar 24, 2009 at 04:57:46PM -0500, Ryan Harper wrote:
 * Alex Williamson alex.william...@hp.com [2009-03-24 16:07]:
  
  On a 2.6.29, x86_64 host/guest, what's special about specifying a guest
  size of -m 3586 when using -mem-path backed by hugetlbfs?  3585 works,
  3586 hangs here:
  
  ...
  PCI-DMA: Using software bounce buffering for IO (SWIOTLB)
  Placing 64MB software IO TLB between 88002000 - 88002400
  software IO TLB at phys 0x2000 - 0x2400
  Memory: 3504832k/4196352k available (2926k kernel code, 524740k absent, 
  166780k reserved, 1260k data, 496k init)
  
  I can back -mem-path by tmpfs or disk and it works fine.  Also works
  with no -mem-path, but it would obviously be nice to benefit from large
  pages on big guests.  The system has plenty of huge pages to back the
  request, and booting with -mem-prealloc makes no difference.  Tested on
  latest git as of today.  Thanks,
 
 I've seen this as well, haven't had a chance to dig into the issue yet
 either.  Certainly can test patches if anyone has an idea of what's
 wrong here.

Can you strace and see if the mmap on hugetlbfs is correctly sized?

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: IO on guest is 20 times slower than host

2009-03-25 Thread Avi Kivity

Kurt Yoder wrote:

Hello list,

I'm puzzled as to why my KVM guest has IO that is 20 times slower than 
the KVM host:


m...@guest:~$ dd if=/dev/zero of=/tmp/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 8.36612 s, 6.1 MB/s
m...@guest:~$ df -h
FilesystemSize  Used Avail Use% Mounted on
/dev/vda1 9.2G  587M  8.2G   7% /
tmpfs 123M 0  123M   0% /lib/init/rw
varrun123M   28K  123M   1% /var/run
varlock   123M 0  123M   0% /var/lock
udev  123M  2.6M  120M   3% /dev
tmpfs 123M 0  123M   0% /dev/shm



m...@host:~$ dd if=/dev/zero of=/tmp/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 0.47188 s, 109 MB/s



The VM is started via libvirt and is the only VM running on an 
otherwise-unused system:


root 19915 1  0 Mar24 ?00:00:00   /usr/sbin/libvirtd -d
nobody   19937 19915  0 Mar24 ?00:00:00 dnsmasq 
--keep-in-foreground --strict-order --bind-interfaces --pid-file  
--conf-file  --listen-address 192.168.122.1 --except-interface lo 
--dhcp-leasefile=/var/lib/libvirt/dhcp-default.leases --dhcp-range 
192.168.122.2,192.168.122.254
root 24354 19915  2 09:52 ?00:02:33 /usr/bin/kvm -S -M 
pc -m 256 -smp 1 -name guest -monitor pty -boot c -drive 
file=/dev/HW_RAID/Guest,if=virtio,index=0,boot=on -net 
nic,macaddr=52:54:00:ba:e6:db,vlan=0,model=virtio -net 
tap,fd=11,script=,vlan=0,ifname=vnet1 -serial none -parallel none -usb 
-vnc 127.0.0.1:1




Can someone suggest where to start looking? Thanks,


I get 141 MB/s on the same test (it only copies to memory anyway).  Is 
your VM under memory pressure?


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM Port

2009-03-25 Thread Avi Kivity

kvm port wrote:

Hi KVM Gurus,

We have a EVB with a fpga based RISC processor with VT support.
As a proof of concept i have to port KVM onto it. we have run linux as of now.
can anyof u help with how should i begin

-thanks n regards
  


What's a EVB?

What do you mean by a RISC processor with VT support?  VT is an x86 
instruction set.


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: kvm-guest-drivers-linux-1 virtio_net crash with 2.6.18.8 kernel

2009-03-25 Thread Avi Kivity
Takeshi Sone wrote:
 Hello,

 I built kvm-guest-drivers-linux-1 on vanilla 2.6.18.8 kernel.
 virtio_blk worked fine.
 However virtio_net crashes right after enabling the device with ifconfig.

 This happens when the net device is connected to host tap.
 It works fine with -net user.

 Host is kvm-84 on 2.6.28 (Ubuntu Jaunty).

   

The kvm-guest-drivers-linux repository proved too difficult to maintain.
You might try a newer guest kernel which has virtio support, or if you
need a 2.6.18 kernel specifically, you might try to port the RHEL 5.3
virtio drivers (RHEL 5.3 is also 2.6.18 based).

-- 
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Can't boot guest with more than 3585MB when using large pages

2009-03-25 Thread Alex Williamson
On Wed, 2009-03-25 at 13:10 -0300, Marcelo Tosatti wrote:
 On Tue, Mar 24, 2009 at 04:57:46PM -0500, Ryan Harper wrote:
  * Alex Williamson alex.william...@hp.com [2009-03-24 16:07]:
   
   On a 2.6.29, x86_64 host/guest, what's special about specifying a guest
   size of -m 3586 when using -mem-path backed by hugetlbfs?  3585 works,
   3586 hangs here:
   
   ...
   PCI-DMA: Using software bounce buffering for IO (SWIOTLB)
   Placing 64MB software IO TLB between 88002000 - 88002400
   software IO TLB at phys 0x2000 - 0x2400
   Memory: 3504832k/4196352k available (2926k kernel code, 524740k absent, 
   166780k reserved, 1260k data, 496k init)
  
  I've seen this as well, haven't had a chance to dig into the issue yet
  either.  Certainly can test patches if anyone has an idea of what's
  wrong here.
 
 Can you strace and see if the mmap on hugetlbfs is correctly sized?

Seems reasonable with some 2MB rounding.

Failing case, -m 3586:

open(/hugepages//kvm.5fuuH5, O_RDWR|O_CREAT|O_EXCL, 0600) = 9
unlink(/hugepages//kvm.5fuuH5)= 0
ftruncate(9, 3783262208)= 0
mmap(NULL, 3783262208, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_POPULATE, 9, 0) = 
0x7f37a5e0

Working case, -m 3585:

open(/hugepages//kvm.Mv6Zgd, O_RDWR|O_CREAT|O_EXCL, 0600) = 9
unlink(/hugepages//kvm.Mv6Zgd)= 0
ftruncate(9, 3781165056)= 0
mmap(NULL, 3781165056, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_POPULATE, 9, 0) = 
0x7fd44b80

Working case using disk backing: -mem-path /tmp -mem-prealloc -m 3586:

open(/tmp/kvm.nPlxl1, O_RDWR|O_CREAT|O_EXCL, 0600) = 9
unlink(/tmp/kvm.nPlxl1)   = 0
ftruncate(9, 3783262208)= 0
mmap(NULL, 3783262208, PROT_READ|PROT_WRITE, MAP_PRIVATE, 9, 0) = 0x7f432e055000


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: IO on guest is 20 times slower than host

2009-03-25 Thread Kurt Yoder


On Mar 25, 2009, at 12:13 PM, Avi Kivity wrote:


Kurt Yoder wrote:

Hello list,

I'm puzzled as to why my KVM guest has IO that is 20 times slower  
than the KVM host:


m...@guest:~$ dd if=/dev/zero of=/tmp/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 8.36612 s, 6.1 MB/s
m...@guest:~$ df -h
FilesystemSize  Used Avail Use% Mounted on
/dev/vda1 9.2G  587M  8.2G   7% /
tmpfs 123M 0  123M   0% /lib/init/rw
varrun123M   28K  123M   1% /var/run
varlock   123M 0  123M   0% /var/lock
udev  123M  2.6M  120M   3% /dev
tmpfs 123M 0  123M   0% /dev/shm



m...@host:~$ dd if=/dev/zero of=/tmp/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 0.47188 s, 109 MB/s



The VM is started via libvirt and is the only VM running on an  
otherwise-unused system:


root 19915 1  0 Mar24 ?00:00:00   /usr/sbin/ 
libvirtd -d
nobody   19937 19915  0 Mar24 ?00:00:00 dnsmasq --keep- 
in-foreground --strict-order --bind-interfaces --pid-file  --conf- 
file  --listen-address 192.168.122.1 --except-interface lo --dhcp- 
leasefile=/var/lib/libvirt/dhcp-default.leases --dhcp-range  
192.168.122.2,192.168.122.254
root 24354 19915  2 09:52 ?00:02:33 /usr/bin/kvm -S  
-M pc -m 256 -smp 1 -name guest -monitor pty -boot c -drive file=/ 
dev/HW_RAID/Guest,if=virtio,index=0,boot=on -net  
nic,macaddr=52:54:00:ba:e6:db,vlan=0,model=virtio -net  
tap,fd=11,script=,vlan=0,ifname=vnet1 -serial none -parallel none - 
usb -vnc 127.0.0.1:1




Can someone suggest where to start looking? Thanks,


I get 141 MB/s on the same test (it only copies to memory anyway).   
Is your VM under memory pressure?


I don't think so. The host has 128 GB of memory, and even the pre- 
emptive kernel caching doesn't come close to filling it:


m...@host:~$ free
 total   used   free sharedbuffers  
cached
Mem: 1320439601742480  130301480  0  84628  
826040

-/+ buffers/cache: 831812  131212148
Swap:  1048568  01048568

My /tmp should be physical disk in both cases. To be thorough, I also  
ran the test while writing the output file to /, and got the same  
results (5.7 MB/s on guest, 144 MB/s on host).


-Kurt
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: IO on guest is 20 times slower than host

2009-03-25 Thread Avi Kivity

Kurt Yoder wrote:


I get 141 MB/s on the same test (it only copies to memory anyway).  
Is your VM under memory pressure?


I don't think so. The host has 128 GB of memory, and even the 
pre-emptive kernel caching doesn't come close to filling it:


m...@host:~$ free
 total   used   free sharedbuffers cached
Mem: 1320439601742480  130301480  0  84628 826040
-/+ buffers/cache: 831812  131212148
Swap:  1048568  01048568

My /tmp should be physical disk in both cases. To be thorough, I also 
ran the test while writing the output file to /, and got the same 
results (5.7 MB/s on guest, 144 MB/s on host).


Your guest is only assigned 256MB though.

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] kvm-autotest: log test failed execption string

2009-03-25 Thread Ryan Harper
When a test case fails and throws an execption, we don't log the
exception details, only that it occured.

reboot: DEBUG: remote_login: Got password prompt; sending '123456'
reboot: DEBUG: remote_login: Got shell prompt -- logged in
reboot: Logged in
reboot: DEBUG: run_once: Test failed; postprocessing on error...
reboot: DEBUG: postprocess_vm: Postprocessing VM 'vm1'...

The attached patch includes the exception text from the failure which makes it
easier to debug.

reboot: DEBUG: remote_login: Got password prompt; sending '123456'
reboot: DEBUG: remote_login: Got shell prompt -- logged in
reboot: Logged in
reboot: DEBUG: run_once: Test failed (invalid syntax (kvm_tests.py, line 34)); 
postprocessing on error...
reboot: DEBUG: postprocess_vm: Postprocessing VM 'vm1'...


-- 
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
ry...@us.ibm.com


diffstat output:
 kvm_runtest_2.py |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

Signed-off-by: Ryan Harper ry...@us.ibm.com
---
diff --git a/client/tests/kvm_runtest_2/kvm_runtest_2.py 
b/client/tests/kvm_runtest_2/kvm_runtest_2.py
index 9add48a..cf6b699 100644
--- a/client/tests/kvm_runtest_2/kvm_runtest_2.py
+++ b/client/tests/kvm_runtest_2/kvm_runtest_2.py
@@ -91,8 +91,8 @@ class kvm_runtest_2(test.test):
 routine_obj.routine(self, params, env)
 env.sync()
 
-except:
-kvm_log.debug(Test failed; postprocessing on error...)
+except Exception, e:
+kvm_log.debug(Test failed (%s); postprocessing on error... 
%(str(e)))
 kvm_preprocessing.postprocess_on_error(self, params, env)
 env.sync()
 raise
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM Port

2009-03-25 Thread kvm port
i used a common term, the RISC processor is having virtualization extensions.
A guest mode is added, alongside the user and kernel mode.



On Wed, Mar 25, 2009 at 9:45 PM, Avi Kivity a...@redhat.com wrote:
 kvm port wrote:

 Hi KVM Gurus,

 We have a EVB with a fpga based RISC processor with VT support.
 As a proof of concept i have to port KVM onto it. we have run linux as of
 now.
 can anyof u help with how should i begin

 -thanks n regards


 What's a EVB?

 What do you mean by a RISC processor with VT support?  VT is an x86
 instruction set.

 --
 error compiling committee.c: too many arguments to function


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM Port

2009-03-25 Thread kvm port
EVB = evaluation board.

On Wed, Mar 25, 2009 at 10:15 PM, kvm port kvmp...@gmail.com wrote:
 i used a common term, the RISC processor is having virtualization extensions.
 A guest mode is added, alongside the user and kernel mode.



 On Wed, Mar 25, 2009 at 9:45 PM, Avi Kivity a...@redhat.com wrote:
 kvm port wrote:

 Hi KVM Gurus,

 We have a EVB with a fpga based RISC processor with VT support.
 As a proof of concept i have to port KVM onto it. we have run linux as of
 now.
 can anyof u help with how should i begin

 -thanks n regards


 What's a EVB?

 What do you mean by a RISC processor with VT support?  VT is an x86
 instruction set.

 --
 error compiling committee.c: too many arguments to function



--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Split kvm source tarballs

2009-03-25 Thread Christoph Hellwig
On Wed, Mar 25, 2009 at 08:44:58AM -0500, Anthony Liguori wrote:
 That's what I figured.  FWIW, the split tarballs work just fine for me.

 It may be worth waiting to do step 2 until the IO thread is merged.  I  
 think once that happens, we could probably do a sprint to get rid of  
 libkvm in kvm-userspace.  That would certainly simplify things.

Yeah.  And having the both common and split repos just confuses the
heck out of any user of the repository.  I think the right way to split
it to wait for libkvm going away and just have a qemu-kvm repository
and an entirely separate kernel module repository.  It's not like there
is anything common but the few exported ABI headers, and we can either
keep them in both (would mean qemu-kvm can always build against a
defined set of headers) or make qemu-kvm require a kernel source like
the current kvm support in upstream qemu.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: IO on guest is 20 times slower than host

2009-03-25 Thread Kurt Yoder


On Mar 25, 2009, at 12:35 PM, Avi Kivity wrote:


Kurt Yoder wrote:


I get 141 MB/s on the same test (it only copies to memory  
anyway).  Is your VM under memory pressure?


I don't think so. The host has 128 GB of memory, and even the pre- 
emptive kernel caching doesn't come close to filling it:


m...@host:~$ free
total   used   free sharedbuffers  
cached
Mem: 1320439601742480  130301480  0  84628  
826040

-/+ buffers/cache: 831812  131212148
Swap:  1048568  01048568

My /tmp should be physical disk in both cases. To be thorough, I  
also ran the test while writing the output file to /, and got the  
same results (5.7 MB/s on guest, 144 MB/s on host).


Your guest is only assigned 256MB though.



I upped the memory on the VM to 1 GB and ran the same test. It  
actually got *slower*:


m...@guest:~$ free
 total   used   free sharedbuffers  
cached
Mem:   1024224 131324 892900  0   1816   
18328

-/+ buffers/cache: 80 913044
Swap:   488248  0 488248
m...@guest:~$ sudo -s
m...@guest:~# dd if=/dev/zero of=/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 22.2141 s, 2.3 MB/s

I ran this three times to make sure it wasn't a fluke, and got those  
same rates each time. The guest is now running thus:


root 19915 1  0 Mar24 ?00:00:00   /usr/sbin/libvirtd -d
nobody   19937 19915  0 Mar24 ?00:00:00 dnsmasq --keep-in- 
foreground --strict-order --bind-interfaces --pid-file  --conf-file  -- 
listen-address 192.168.122.1 --except-interface lo --dhcp-leasefile=/ 
var/lib/libvirt/dhcp-default.leases --dhcp-range  
192.168.122.2,192.168.122.254
root 25051 19915 54 12:40 ?00:05:05 /usr/bin/kvm -S -M  
pc -m 1024 -smp 1 -name guest -monitor pty -boot c -drive file=/dev/ 
HW_RAID/Guest,if=virtio,index=0,boot=on -net  
nic,macaddr=52:54:00:ba:e6:db,vlan=0,model=virtio -net  
tap,fd=11,script=,vlan=0,ifname=vnet1 -serial none -parallel none -usb  
-vnc 127.0.0.1:1

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: IO on guest is 20 times slower than host

2009-03-25 Thread Avi Kivity

Kurt Yoder wrote:


I upped the memory on the VM to 1 GB and ran the same test. It 
actually got *slower*:


m...@guest:~$ free
 total   used   free sharedbuffers cached
Mem:   1024224 131324 892900  0   1816  18328
-/+ buffers/cache: 80 913044
Swap:   488248  0 488248
m...@guest:~$ sudo -s
m...@guest:~# dd if=/dev/zero of=/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 22.2141 s, 2.3 MB/s




Wierd.  Can you change the disk interface to ide to rule out virtio issues?

(though dd should be running entirely in cache)

Is your disk a raw volume or qcow file?

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: IO on guest is 20 times slower than host

2009-03-25 Thread Kurt Yoder


On Mar 25, 2009, at 12:54 PM, Avi Kivity wrote:


Kurt Yoder wrote:


I upped the memory on the VM to 1 GB and ran the same test. It  
actually got *slower*:


m...@guest:~$ free
total   used   free sharedbuffers  
cached
Mem:   1024224 131324 892900  0   1816   
18328

-/+ buffers/cache: 80 913044
Swap:   488248  0 488248
m...@guest:~$ sudo -s
m...@guest:~# dd if=/dev/zero of=/bigfile count=10
10+0 records in
10+0 records out
5120 bytes (51 MB) copied, 22.2141 s, 2.3 MB/s




Wierd.  Can you change the disk interface to ide to rule out virtio  
issues?


(though dd should be running entirely in cache)

Is your disk a raw volume or qcow file?




It was actually set as an ide disk before it was a virtio disk. I  
switched it to virtio to try to get rid of this problem, but no luck  
so far.


The disk lives on an LVM logical volume.

What do you mean about the cache? Is my test fundamentally flawed? I  
*thought* I was testing write speed on the disk...


-Kurt
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[ kvm-Bugs-2351676 ] Guests hang periodically on Ubuntu-8.10

2009-03-25 Thread SourceForge.net
Bugs item #2351676, was opened at 2008-11-26 09:59
Message generated for change (Comment added) made by clesiuk
You can respond by visiting: 
https://sourceforge.net/tracker/?func=detailatid=893831aid=2351676group_id=180599

Please note that this message will contain a full copy of the comment thread,
including the initial issue submission, for this request,
not just the latest update.
Category: None
Group: None
Status: Open
Resolution: None
Priority: 5
Private: No
Submitted By: Chris Jones (c_jones)
Assigned to: Nobody/Anonymous (nobody)
Summary: Guests hang periodically on Ubuntu-8.10

Initial Comment:
I'm seeing periodic hangs on my guests.  I've been unable so far to find a 
trigger - they always boot fine, but after anywhere from 10 minutes to 24 hours 
they eventually hang completely.

My setup:
  * AMD Athlon X2 4850e (2500 MHz dual core)
  * 4Gig memory
  * Ubuntu 8.10 server, 64-bit
  * KVMs tried:
: kvm-72 (shipped with ubuntu)
: kvm-79 (built myself, --patched-kernel option)
  * Kernels tried:
: 2.6.27.7 (kernel.org, self built)
: 2.6.27-7-server from Ubuntu 8.10 distribution

  In guests
  * Ubuntu 8.10 server, 64-bit (virtual machine install)
  * kernel 2.6.27-7-server from Ubuntu 8.10

I'm running the guests like:
  sudo /usr/local/bin/qemu-system-x86_64\
 -daemonize \
 -no-kvm-irqchip\
 -hda Imgs/ndev_root.img\
 -m 1024\
 -cdrom ISOs/ubuntu-8.10-server-amd64.iso   \
 -vnc :4\
 -net nic,macaddr=DE:AD:BE:EF:04:04,model=e1000 \
 -net tap,ifname=tap4,script=/home/chris/kvm/qemu-ifup.sh 

The problem does not happen if I use -no-kvm.

I've tried some other options that have no effect:
  -no-kvm-pit
  -no-acpi

The disk images are raw format.

When the guests hang, I cannot ping them, and the vnc console us hung.  The 
qemu monitor is still accessible, and the guests recover if I issue a 
system_reset command from the monitor.  However, often, the console will not 
take keyboard after doing so.

When the guest is hung, kvm_stat shows all 0s for the counters:

efer_relo  exits  fpu_reloa  halt_exit  halt_wake  host_stat  hypercall
+insn_emul  insn_emul invlpg   io_exits  irq_exits  irq_windo  largepage
+mmio_exit  mmu_cache  mmu_flood  mmu_pde_z  mmu_pte_u  mmu_pte_w  mmu_recyc
+mmu_shado  nmi_windo   pf_fixed   pf_guest  remote_tl  request_i  signal_ex
+tlb_flush
  0  0  0  0  0  0  0
+0  0  0  0  0  0  0  0
+0  0  0  0  0  0  0  0
+0  0  0  0  0  0

gdb shows two threads - both waiting:

c(gdb) info threads
  2 Thread 0x414f1950 (LWP 422)  0x7f36f07a03e1 in sigtimedwait ()
   from /lib/libc.so.6
  1 Thread 0x7f36f1f306e0 (LWP 414)  0x7f36f084b482 in select ()
   from /lib/libc.so.6
(gdb) thread 1
[Switching to thread 1 (Thread 0x7f36f1f306e0 (LWP 414))]#0  0x7f36f084b482
+in select () from /lib/libc.so.6
(gdb) bt
#0  0x7f36f084b482 in select () from /lib/libc.so.6
#1  0x004094cb in main_loop_wait (timeout=0)
at /home/chris/pkgs/kvm/kvm-79/qemu/vl.c:4719
#2  0x0050a7ea in kvm_main_loop ()
at /home/chris/pkgs/kvm/kvm-79/qemu/qemu-kvm.c:619
#3  0x0040fafc in main (argc=value optimized out,
argv=0x79f41948) at /home/chris/pkgs/kvm/kvm-79/qemu/vl.c:4871
(gdb) thread 2
[Switching to thread 2 (Thread 0x414f1950 (LWP 422))]#0  0x7f36f07a03e1 in
+sigtimedwait () from /lib/libc.so.6
(gdb) bt
#0  0x7f36f07a03e1 in sigtimedwait () from /lib/libc.so.6
#1  0x0050a560 in kvm_main_loop_wait (env=0xc319e0, timeout=0)
at /home/chris/pkgs/kvm/kvm-79/qemu/qemu-kvm.c:284
#2  0x0050aaf7 in ap_main_loop (_env=value optimized out)
at /home/chris/pkgs/kvm/kvm-79/qemu/qemu-kvm.c:425
#3  0x7f36f11ba3ea in start_thread () from /lib/libpthread.so.0
#4  0x7f36f0852c6d in clone () from /lib/libc.so.6
#5  0x in ?? ()


Any clues to help me resolve this would be much appreciated.


--

Comment By: B. Cameron Lesiuk (clesiuk)
Date: 2009-03-25 10:35

Message:
I have a similar problem as the original poster. 

I've discovered a possible workaround: disable CPU frequency scaling in
the host:
# apt-get remove powernowd

I'm running with disabled frequency scaling and so far my system is
stable.

I set the host frequency manually: 
# cd /sys/devices/system/cpu/cpu0/cpufreq
# cat scaling_available_frequencies
 250 240 220 200 180 100 
# cat scaling_available_governors
 conservative ondemand userspace powersave performance 
# echo powersave  scaling_governor(minimum frequency)
# echo performance  

Re: IO on guest is 20 times slower than host

2009-03-25 Thread Avi Kivity

Kurt Yoder wrote:


What do you mean about the cache? Is my test fundamentally flawed? I 
*thought* I was testing write speed on the disk...


'dd', without further arguments, will write to the page cache and let 
the kernel write the data back at a later time.  If you increase the 
block size (bs=1M count=1000) you should see much faster times on the 
host, I wouldn't be surprised to see 1GB/s.


If  you want to test disk speed, use of=/dev/blah oflag=direct.  Beware 
of destroying your data disk.


Something weird is happening with your system.  If you extend the test, 
what does 'top' show?  On both guest and host.


--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Split kvm source tarballs

2009-03-25 Thread Avi Kivity

Christoph Hellwig wrote:

On Wed, Mar 25, 2009 at 08:44:58AM -0500, Anthony Liguori wrote:
  

That's what I figured.  FWIW, the split tarballs work just fine for me.

It may be worth waiting to do step 2 until the IO thread is merged.  I  
think once that happens, we could probably do a sprint to get rid of  
libkvm in kvm-userspace.  That would certainly simplify things.



Yeah.  And having the both common and split repos just confuses the
heck out of any user of the repository.  I think the right way to split
it to wait for libkvm going away and just have a qemu-kvm repository
and an entirely separate kernel module repository.  It's not like there
is anything common but the few exported ABI headers, and we can either
keep them in both (would mean qemu-kvm can always build against a
defined set of headers) or make qemu-kvm require a kernel source like
the current kvm support in upstream qemu.
  


While I strongly dislike duplicating code under source control, I'm 
beginning to lean in this direction, since the situation is beginning to 
confuse me too.


So how about this:

- keep copies of the headers in the qemu repository. 'make sync' becomes 
a maintainer tool rather than a developer tool
- move qemu to the root of the repository, and reparent libkvm/ user/ 
and friends under it.  this will give us easier merging.

- move the external module kit into kvm.git

We end up with a standalone kvm-userspace.git which is easily mergable 
to and from qemu.git, and kvm.git which can build either a Linux kernel 
(and is easily mergable to and from Linus' tree and others) or an 
external module.


No git submodules or inter-repository dependencies.  What's not to like?

--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Split kvm source tarballs

2009-03-25 Thread Christoph Hellwig
On Wed, Mar 25, 2009 at 08:02:48PM +0200, Avi Kivity wrote:
 So how about this:

 - keep copies of the headers in the qemu repository. 'make sync' becomes  
 a maintainer tool rather than a developer tool

Yeah.  That similar how we maintain the headers and some shared source
file for XFS and libxfs in xfsprogs.

 - move qemu to the root of the repository, and reparent libkvm/ user/  
 and friends under it.  this will give us easier merging.

Yeah.  While you're at it user/ might be renamed to something more
descriptive.

 - move the external module kit into kvm.git

As in your kvm development kernel tree?  Not sure it's a good idea
to suck this into a full kernel tree.  Probably worth making it a small
repository of it's own.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: IO on guest is 20 times slower than host

2009-03-25 Thread Kurt Yoder


On Mar 25, 2009, at 1:55 PM, Avi Kivity wrote:


Kurt Yoder wrote:


What do you mean about the cache? Is my test fundamentally flawed?  
I *thought* I was testing write speed on the disk...


'dd', without further arguments, will write to the page cache and  
let the kernel write the data back at a later time.  If you increase  
the block size (bs=1M count=1000) you should see much faster times  
on the host, I wouldn't be surprised to see 1GB/s.


If  you want to test disk speed, use of=/dev/blah oflag=direct.   
Beware of destroying your data disk.




I see. I looked up another test: using hdparm -t. It doesn't show the  
situation as quite so bad, but the guest is still a little over half  
the speed of the host:


m...@host:~$ sudo hdparm -t /dev/mapper/HW_RAID-ROOT

/dev/mapper/HW_RAID-ROOT:
 Timing buffered disk reads:  282 MB in  3.00 seconds =  93.92 MB/sec

m...@guest:~# hdparm -t /dev/vda

/dev/vda:
 Timing buffered disk reads:  156 MB in  3.03 seconds =  51.56 MB/sec



Something weird is happening with your system.  If you extend the  
test, what does 'top' show?  On both guest and host.


If I extend the test thusly on the guest:

dd if=/dev/zero of=/bigfile count=1000

I see 100% CPU utilization on the guest, and 100% CPU utilization on  
one of the host cores.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: kvm-84 and guests with more than 3536 MB Ram?

2009-03-25 Thread Lukas Kolbe
On Di, 2009-03-24 at 16:27 -0500, Ryan Harper wrote:

Hi Ryan (and all), the issue seems to be solved, see below.
 
 you can test kvm-84 bios with:
 
 % cd kvm-84/qemu
 % ./x86_64-softmmu/qemu-system-x86_64 -L pc-bios all your other options
 
 That will force qemu to look in the kvm-84 pc-bios dir for bios instead
 of /usr/local/share/qemu
 
 if *that* works, then you didn't make install kvm-84, and running with
 mismatched bios bins is sure way screw things up.

This didn't work (neither the self-compiled kvm-84 nor the
debian-provided one), but using a newer 2.6.28 snapshot from debian
(based on 2.6.28.8, where the previously used one was based on .4 IIRC)
we are able to boot into the 7G Ram guest again. Thanks for your help
(and patience)!


-- 
Lukas


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Split kvm source tarballs

2009-03-25 Thread Anthony Liguori

Hi Avi,

I spent some time today putting together an approximation of the KVM 
stable release based on QEMU 0.10.x.  In principle, it's not too bad at 
all because it's just a matter of creating a branch in kvm-userspace 
that's based a kvm commit sometime after the QEMU 0.10.x release but 
before the next qemu-cvs merge you did that happened post QEMU 0.10.x.  
Basically, it was a merge of 72ee81f and dde.


The problem for me is pulling in the QEMU stable fixes.  You don't have 
a branch that tracks the QEMU stable tree and I can pull in the QEMU 
stable git tree without rewriting history since the directory layout is 
different.


But if you created a qemu-svn-stable branch that followed the QEMU 
stable tree in kvm-userspace, like the qemu-cvs branch follows trunk, 
then it would be pretty easy to create and maintain a kvm_stable_0_10 
branch of whatever you'd like to call it in kvm-userspace.


Any chance you could do this?  I suspect it's just a matter of creating 
the branch based off of the qemu-cvs tree at dde and then doing a 
git-svn fetch.


Regards,

Anthony Liguori

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: KVM Port

2009-03-25 Thread Liu Yu-B13201

IMHO, one thing you should keep in mind is how to isolate the guest space based 
on your hardware MMU.
And then deal with the exceptions carefully, 
some may be directly send to guest and some should be handled by hypervisor.

In powerpc BOOKE implementation, we have to hijack all exceptions,
because BOOKE doesn't support technic like VT.


 -Original Message-
 From: kvm-ow...@vger.kernel.org 
 [mailto:kvm-ow...@vger.kernel.org] On Behalf Of kvm port
 Sent: Wednesday, March 25, 2009 11:08 PM
 To: kvm@vger.kernel.org; kvm-...@vger.kernel.org
 Subject: KVM Port
 
 Hi KVM Gurus,
 
 We have a EVB with a fpga based RISC processor with VT support.
 As a proof of concept i have to port KVM onto it. we have run 
 linux as of now.
 can anyof u help with how should i begin
 
 
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Luvalley project: running KVM without Linux

2009-03-25 Thread Xiaodong Yi
Luvalley is a Virtual Machine Monitor (VMM) spawned from the KVM
project. Its part of source codes are derived from KVM to virtualize
CPU instructions and memory management unit (MMU). However, its
overall architecture is completely different from KVM, but somewhat
like Xen. Luvalley runs outside of Linux, just like Xen's
architecture, but it still uses Linux as its scheduler, memory
manager, physical device driver provider and virtual IO device
emulator. Moreover, Luvalley may run WITHOUT Linux. In theory, any
operating system could take the place of Linux to provide the above
services. Currently, Luvalley supports Linux and Windows. That is to
say, one may run Luvalley to boot a Linux or Windows, and then run
multiple virtualized operating systems on such Linux or Windows.

If you are interested in Luvalley project, you may download the source
codes from
 http://sourceforge.net/projects/luvalley/

The following is more details about Luvalley.

Luvalley is an external hypervisor, just like Xen
(http://www.xen.org). It boots and controls the X86 machine before
starting up any operating system. However, Luvalley is much smaller
and simpler than Xen. Most jobs of Xen, such as scheduling, memory
management, interrupt management, etc, are shifted to Linux (or any
other OS), which is running on top of Luvalley.

Luvalley gets booted first when the X86 machine is power on. It boots
up all CPUs in SMP system and enables their virtualization extensions.
Then the MBR (Master Boot Record) is read out and executed in CPU's
virtualization mode. Following this way, a Linux (or any other OS)
will be booted up at last. Luvalley assigns all physical memory,
programmable interrupt controller (PIC) and IO devices to this
priviledged OS. Following Xen, we call this OS as domain 0 (dom0)
OS.

Like KVM, a modified Qemu is running on dom0 Linux to provide virtual
IO devices for other operating systems running on top of Luvalley. We
also follow Xen to call these operating systems domain user (domU).
That is to say, there must be exact one dom0 OS and may be several
domU OSs running on top of Luvalley. Each domU OS corresponds to a
Qemu process in dom0 OS. The memory of domU is allocated from dom0 by
Qemu. And when Qemu is scheduled to run by dom0 Scheduler, it will
call Luvalley to run the corresponding domU.

Moreover, as Luvalley requires nothing from the dom0 Linux kernel,
other operating systems such as Windows, FreeBSD, etc can also serve
as dom0 OS, provided that Qemu can be ported to these operating
systems. Since Qemu is an userland application and is able to cross
platform, such porting is feasible. Currently, we have added the
Luvalley support into Qemu-0.10.0, which can be compiled and run in
Windows. With the help of Luvalley, Qemu-0.10.0 runs much faster
becuase it could utilize the VT support provided by Intel CPU.

In summary, Luvalley inherited all merits from KVM. Especially,
Luvalley is very small and simple. It is even more easy-to-use than
KVM because it does not depend on specific Linux kernel version. Every
version of Linux can serve as Luvalley's dom0 OS, except that Qemu
cannot run on it.

In addition, we think Luvalley's architecture meets the demand on both
desktop and server operating system area:

1. In the desktop area, there are many kinds of operating systems
runing on various hardwares and devices. In theory, it is rather easy
to add virtualization ability for all kinds of operating systems,
without sacrificing the hardware compatibility and the user
experience. Moreover, Luvalley is very easy to install. It requires
only a boot loader which supports Multiboot Specification, e.g., Grub,
WinGrub (http://sourceforge.net/projects/grub4dos), etc.

2. In the server area, especially for large-scale server systems (for
example, throusands of CPUs), a single Linux is not suitable to manage
the whole system. Therefore, KVM cannot be used properly. Luvalley's
architecture is more suitable for servers. For example, it can be used
to divide physical resources to partitions, and run a Linux for each
partition. In addition, Luvalley is very small and may be put into
BIOS to serve as a virtulization firmware.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Luvalley project: running KVM without Linux

2009-03-25 Thread Jun Koi
Very cool! Who is behind this project? Intel?

I will give it a try!

Thanks,
Jun

On Thu, Mar 26, 2009 at 12:20 PM, Xiaodong Yi xdong...@gmail.com wrote:
 Luvalley is a Virtual Machine Monitor (VMM) spawned from the KVM
 project. Its part of source codes are derived from KVM to virtualize
 CPU instructions and memory management unit (MMU). However, its
 overall architecture is completely different from KVM, but somewhat
 like Xen. Luvalley runs outside of Linux, just like Xen's
 architecture, but it still uses Linux as its scheduler, memory
 manager, physical device driver provider and virtual IO device
 emulator. Moreover, Luvalley may run WITHOUT Linux. In theory, any
 operating system could take the place of Linux to provide the above
 services. Currently, Luvalley supports Linux and Windows. That is to
 say, one may run Luvalley to boot a Linux or Windows, and then run
 multiple virtualized operating systems on such Linux or Windows.

 If you are interested in Luvalley project, you may download the source
 codes from
     http://sourceforge.net/projects/luvalley/

 The following is more details about Luvalley.

 Luvalley is an external hypervisor, just like Xen
 (http://www.xen.org). It boots and controls the X86 machine before
 starting up any operating system. However, Luvalley is much smaller
 and simpler than Xen. Most jobs of Xen, such as scheduling, memory
 management, interrupt management, etc, are shifted to Linux (or any
 other OS), which is running on top of Luvalley.

 Luvalley gets booted first when the X86 machine is power on. It boots
 up all CPUs in SMP system and enables their virtualization extensions.
 Then the MBR (Master Boot Record) is read out and executed in CPU's
 virtualization mode. Following this way, a Linux (or any other OS)
 will be booted up at last. Luvalley assigns all physical memory,
 programmable interrupt controller (PIC) and IO devices to this
 priviledged OS. Following Xen, we call this OS as domain 0 (dom0)
 OS.

 Like KVM, a modified Qemu is running on dom0 Linux to provide virtual
 IO devices for other operating systems running on top of Luvalley. We
 also follow Xen to call these operating systems domain user (domU).
 That is to say, there must be exact one dom0 OS and may be several
 domU OSs running on top of Luvalley. Each domU OS corresponds to a
 Qemu process in dom0 OS. The memory of domU is allocated from dom0 by
 Qemu. And when Qemu is scheduled to run by dom0 Scheduler, it will
 call Luvalley to run the corresponding domU.

 Moreover, as Luvalley requires nothing from the dom0 Linux kernel,
 other operating systems such as Windows, FreeBSD, etc can also serve
 as dom0 OS, provided that Qemu can be ported to these operating
 systems. Since Qemu is an userland application and is able to cross
 platform, such porting is feasible. Currently, we have added the
 Luvalley support into Qemu-0.10.0, which can be compiled and run in
 Windows. With the help of Luvalley, Qemu-0.10.0 runs much faster
 becuase it could utilize the VT support provided by Intel CPU.

 In summary, Luvalley inherited all merits from KVM. Especially,
 Luvalley is very small and simple. It is even more easy-to-use than
 KVM because it does not depend on specific Linux kernel version. Every
 version of Linux can serve as Luvalley's dom0 OS, except that Qemu
 cannot run on it.

 In addition, we think Luvalley's architecture meets the demand on both
 desktop and server operating system area:

 1. In the desktop area, there are many kinds of operating systems
 runing on various hardwares and devices. In theory, it is rather easy
 to add virtualization ability for all kinds of operating systems,
 without sacrificing the hardware compatibility and the user
 experience. Moreover, Luvalley is very easy to install. It requires
 only a boot loader which supports Multiboot Specification, e.g., Grub,
 WinGrub (http://sourceforge.net/projects/grub4dos), etc.

 2. In the server area, especially for large-scale server systems (for
 example, throusands of CPUs), a single Linux is not suitable to manage
 the whole system. Therefore, KVM cannot be used properly. Luvalley's
 architecture is more suitable for servers. For example, it can be used
 to divide physical resources to partitions, and run a Linux for each
 partition. In addition, Luvalley is very small and may be put into
 BIOS to serve as a virtulization firmware.
 --
 To unsubscribe from this list: send the line unsubscribe kvm in
 the body of a message to majord...@vger.kernel.org
 More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: Luvalley project: running KVM without Linux

2009-03-25 Thread Sheng Yang
On Thursday 26 March 2009 11:56:20 Jun Koi wrote:
 Very cool! Who is behind this project? Intel?

Not us...

According to http://lists.centos.org/pipermail/centos-promo/2009-
February/000402.html, it is the Operating System Engineering Research Center 
in China.

-- 
regards
Yang, Sheng


 I will give it a try!

 Thanks,
 Jun

 On Thu, Mar 26, 2009 at 12:20 PM, Xiaodong Yi xdong...@gmail.com wrote:
  Luvalley is a Virtual Machine Monitor (VMM) spawned from the KVM
  project. Its part of source codes are derived from KVM to virtualize
  CPU instructions and memory management unit (MMU). However, its
  overall architecture is completely different from KVM, but somewhat
  like Xen. Luvalley runs outside of Linux, just like Xen's
  architecture, but it still uses Linux as its scheduler, memory
  manager, physical device driver provider and virtual IO device
  emulator. Moreover, Luvalley may run WITHOUT Linux. In theory, any
  operating system could take the place of Linux to provide the above
  services. Currently, Luvalley supports Linux and Windows. That is to
  say, one may run Luvalley to boot a Linux or Windows, and then run
  multiple virtualized operating systems on such Linux or Windows.
 
  If you are interested in Luvalley project, you may download the source
  codes from
      http://sourceforge.net/projects/luvalley/
 
  The following is more details about Luvalley.
 
  Luvalley is an external hypervisor, just like Xen
  (http://www.xen.org). It boots and controls the X86 machine before
  starting up any operating system. However, Luvalley is much smaller
  and simpler than Xen. Most jobs of Xen, such as scheduling, memory
  management, interrupt management, etc, are shifted to Linux (or any
  other OS), which is running on top of Luvalley.
 
  Luvalley gets booted first when the X86 machine is power on. It boots
  up all CPUs in SMP system and enables their virtualization extensions.
  Then the MBR (Master Boot Record) is read out and executed in CPU's
  virtualization mode. Following this way, a Linux (or any other OS)
  will be booted up at last. Luvalley assigns all physical memory,
  programmable interrupt controller (PIC) and IO devices to this
  priviledged OS. Following Xen, we call this OS as domain 0 (dom0)
  OS.
 
  Like KVM, a modified Qemu is running on dom0 Linux to provide virtual
  IO devices for other operating systems running on top of Luvalley. We
  also follow Xen to call these operating systems domain user (domU).
  That is to say, there must be exact one dom0 OS and may be several
  domU OSs running on top of Luvalley. Each domU OS corresponds to a
  Qemu process in dom0 OS. The memory of domU is allocated from dom0 by
  Qemu. And when Qemu is scheduled to run by dom0 Scheduler, it will
  call Luvalley to run the corresponding domU.
 
  Moreover, as Luvalley requires nothing from the dom0 Linux kernel,
  other operating systems such as Windows, FreeBSD, etc can also serve
  as dom0 OS, provided that Qemu can be ported to these operating
  systems. Since Qemu is an userland application and is able to cross
  platform, such porting is feasible. Currently, we have added the
  Luvalley support into Qemu-0.10.0, which can be compiled and run in
  Windows. With the help of Luvalley, Qemu-0.10.0 runs much faster
  becuase it could utilize the VT support provided by Intel CPU.
 
  In summary, Luvalley inherited all merits from KVM. Especially,
  Luvalley is very small and simple. It is even more easy-to-use than
  KVM because it does not depend on specific Linux kernel version. Every
  version of Linux can serve as Luvalley's dom0 OS, except that Qemu
  cannot run on it.
 
  In addition, we think Luvalley's architecture meets the demand on both
  desktop and server operating system area:
 
  1. In the desktop area, there are many kinds of operating systems
  runing on various hardwares and devices. In theory, it is rather easy
  to add virtualization ability for all kinds of operating systems,
  without sacrificing the hardware compatibility and the user
  experience. Moreover, Luvalley is very easy to install. It requires
  only a boot loader which supports Multiboot Specification, e.g., Grub,
  WinGrub (http://sourceforge.net/projects/grub4dos), etc.
 
  2. In the server area, especially for large-scale server systems (for
  example, throusands of CPUs), a single Linux is not suitable to manage
  the whole system. Therefore, KVM cannot be used properly. Luvalley's
  architecture is more suitable for servers. For example, it can be used
  to divide physical resources to partitions, and run a Linux for each
  partition. In addition, Luvalley is very small and may be put into
  BIOS to serve as a virtulization firmware.
  --
  To unsubscribe from this list: send the line unsubscribe kvm in
  the body of a message to majord...@vger.kernel.org
  More majordomo info at  http://vger.kernel.org/majordomo-info.html

 --
 To unsubscribe from this list: send the line unsubscribe kvm in
 the 

[no subject]

2009-03-25 Thread Bear Yang

subscribe kvm
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


KVM Port

2009-03-25 Thread kvm port
Hi KVM Gurus,

We have a EVB with a fpga based RISC processor with VT support.
As a proof of concept i have to port KVM onto it. we have run linux as of now.
can anyof u help with how should i begin

-thanks n regards
kvport bunch
--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html