[PATCH] Activate Virtualization On Demand

2009-09-15 Thread Alexander Graf
X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

Signed-off-by: Alexander Graf ag...@suse.de

--

I've tested the following:

  - shutdown
  - suspend / resume to RAM
  - suspend / resume to DISK
  - running VirtualBox while kvm module is loaded

v1 - v2

  - move failure disable into locked section
  - move vmx's ept_sync_global from init to hardware_enable
---
 arch/ia64/kvm/kvm-ia64.c|8 ++-
 arch/powerpc/kvm/powerpc.c  |3 +-
 arch/s390/kvm/kvm-s390.c|3 +-
 arch/x86/include/asm/kvm_host.h |2 +-
 arch/x86/kvm/svm.c  |   13 --
 arch/x86/kvm/vmx.c  |   11 +++-
 arch/x86/kvm/x86.c  |4 +-
 include/linux/kvm_host.h|2 +-
 virt/kvm/kvm_main.c |   90 +-
 9 files changed, 108 insertions(+), 28 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f6471c8..5fdeec5 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 
*opt_handler)
 
 static  DEFINE_SPINLOCK(vp_lock);
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
long  status;
long  tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot  0)
-   return;
+   return -EINVAL;
 
spin_lock(vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
if (status != 0) {
printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
-   return ;
+   return -EINVAL;
}
 
if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(vp_lock);
ia64_ptr_entry(0x3, slot);
+
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 95af622..5902bbc 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -78,8 +78,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu 
*vcpu)
return r;
 }
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 00e2ce8..5445058 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 static unsigned long long *facilities;
 
 /* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
/* every s390 is virtualization enabled ;-) */
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6046e6f..b17886f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -462,7 +462,7 @@ struct descriptor_table {
 struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void);  /* __init */
int (*disabled_by_bios)(void); /* __init */
-   void (*hardware_enable)(void *dummy);  /* __init */
+   int (*hardware_enable)(void *dummy);
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void);   /* __init */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a5f90c7..2f3a388 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
cpu_svm_disable();
 }
 
-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
 {
 
struct svm_cpu_data *svm_data;
@@ -325,16 +325,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();
 
+   rdmsrl(MSR_EFER, efer);
+   

Re: [PATCH] Activate Virtualization On Demand

2009-09-14 Thread Marcelo Tosatti
On Wed, Sep 09, 2009 at 04:18:58PM +0200, Alexander Graf wrote:
 X86 CPUs need to have some magic happening to enable the virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid TLB
 entries (svm).
 
 Currently KVM activates virtualization when the respective kernel module
 is loaded. This blocks us from autoloading KVM modules without breaking
 other VMMs.
 
 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on destruction of the last one.
 
 So using this, KVM can be easily autoloaded, while keeping other
 hypervisors usable.
 
 Signed-off-by: Alexander Graf ag...@suse.de
 
 --
 
 I've tested the following:
 
   - shutdown
   - suspend / resume to RAM
   - running VirtualBox while kvm module is loaded
 ---
  arch/ia64/kvm/kvm-ia64.c|8 ++-
  arch/powerpc/kvm/powerpc.c  |3 +-
  arch/s390/kvm/kvm-s390.c|3 +-
  arch/x86/include/asm/kvm_host.h |2 +-
  arch/x86/kvm/svm.c  |   13 --
  arch/x86/kvm/vmx.c  |7 +++-
  arch/x86/kvm/x86.c  |4 +-
  include/linux/kvm_host.h|2 +-
  virt/kvm/kvm_main.c |   82 
 +--
  9 files changed, 98 insertions(+), 26 deletions(-)
 
 diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
 index f6471c8..5fdeec5 100644
 --- a/arch/ia64/kvm/kvm-ia64.c
 +++ b/arch/ia64/kvm/kvm-ia64.c
 @@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 
 *opt_handler)
  
  static  DEFINE_SPINLOCK(vp_lock);
  
 -void kvm_arch_hardware_enable(void *garbage)
 +int kvm_arch_hardware_enable(void *garbage)
  {
   long  status;
   long  tmp_base;
 @@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
   slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
   local_irq_restore(saved_psr);
   if (slot  0)
 - return;
 + return -EINVAL;
  
   spin_lock(vp_lock);
   status = ia64_pal_vp_init_env(kvm_vsa_base ?
 @@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
   __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
   if (status != 0) {
   printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
 - return ;
 + return -EINVAL;
   }
  
   if (!kvm_vsa_base) {
 @@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
   }
   spin_unlock(vp_lock);
   ia64_ptr_entry(0x3, slot);
 +
 + return 0;
  }
  
  void kvm_arch_hardware_disable(void *garbage)
 diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
 index 95af622..5902bbc 100644
 --- a/arch/powerpc/kvm/powerpc.c
 +++ b/arch/powerpc/kvm/powerpc.c
 @@ -78,8 +78,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct 
 kvm_vcpu *vcpu)
   return r;
  }
  
 -void kvm_arch_hardware_enable(void *garbage)
 +int kvm_arch_hardware_enable(void *garbage)
  {
 + return 0;
  }
  
  void kvm_arch_hardware_disable(void *garbage)
 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
 index 00e2ce8..5445058 100644
 --- a/arch/s390/kvm/kvm-s390.c
 +++ b/arch/s390/kvm/kvm-s390.c
 @@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
  static unsigned long long *facilities;
  
  /* Section: not file related */
 -void kvm_arch_hardware_enable(void *garbage)
 +int kvm_arch_hardware_enable(void *garbage)
  {
   /* every s390 is virtualization enabled ;-) */
 + return 0;
  }
  
  void kvm_arch_hardware_disable(void *garbage)
 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
 index 6046e6f..b17886f 100644
 --- a/arch/x86/include/asm/kvm_host.h
 +++ b/arch/x86/include/asm/kvm_host.h
 @@ -462,7 +462,7 @@ struct descriptor_table {
  struct kvm_x86_ops {
   int (*cpu_has_kvm_support)(void);  /* __init */
   int (*disabled_by_bios)(void); /* __init */
 - void (*hardware_enable)(void *dummy);  /* __init */
 + int (*hardware_enable)(void *dummy);
   void (*hardware_disable)(void *dummy);
   void (*check_processor_compatibility)(void *rtn);
   int (*hardware_setup)(void);   /* __init */
 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
 index a5f90c7..2f3a388 100644
 --- a/arch/x86/kvm/svm.c
 +++ b/arch/x86/kvm/svm.c
 @@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
   cpu_svm_disable();
  }
  
 -static void svm_hardware_enable(void *garbage)
 +static int svm_hardware_enable(void *garbage)
  {
  
   struct svm_cpu_data *svm_data;
 @@ -325,16 +325,20 @@ static void svm_hardware_enable(void *garbage)
   struct desc_struct *gdt;
   int me = raw_smp_processor_id();
  
 + rdmsrl(MSR_EFER, efer);
 + if 

Re: [PATCH] Activate Virtualization On Demand

2009-09-14 Thread Marcelo Tosatti
On Mon, Sep 14, 2009 at 05:52:48PM +0200, Alexander Graf wrote:

 On 14.09.2009, at 15:23, Marcelo Tosatti wrote:

 On Wed, Sep 09, 2009 at 04:18:58PM +0200, Alexander Graf wrote:
 X86 CPUs need to have some magic happening to enable the  
 virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid  
 TLB
 entries (svm).

 Currently KVM activates virtualization when the respective kernel  
 module
 is loaded. This blocks us from autoloading KVM modules without  
 breaking
 other VMMs.

 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on destruction of the last one.

 So using this, KVM can be easily autoloaded, while keeping other
 hypervisors usable.

 Signed-off-by: Alexander Graf ag...@suse.de

 --

 I've tested the following:

  - shutdown
  - suspend / resume to RAM
  - running VirtualBox while kvm module is loaded
 ---
 arch/ia64/kvm/kvm-ia64.c|8 ++-
 arch/powerpc/kvm/powerpc.c  |3 +-
 arch/s390/kvm/kvm-s390.c|3 +-
 arch/x86/include/asm/kvm_host.h |2 +-
 arch/x86/kvm/svm.c  |   13 --
 arch/x86/kvm/vmx.c  |7 +++-
 arch/x86/kvm/x86.c  |4 +-
 include/linux/kvm_host.h|2 +-
 virt/kvm/kvm_main.c |   82 + 
 --
 9 files changed, 98 insertions(+), 26 deletions(-)

 diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
 index f6471c8..5fdeec5 100644
 --- a/arch/ia64/kvm/kvm-ia64.c
 +++ b/arch/ia64/kvm/kvm-ia64.c
 @@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, 
 u64 *opt_handler)

 static  DEFINE_SPINLOCK(vp_lock);

 -void kvm_arch_hardware_enable(void *garbage)
 +int kvm_arch_hardware_enable(void *garbage)
 {
 long  status;
 long  tmp_base;
 @@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
 local_irq_restore(saved_psr);
 if (slot  0)
 -   return;
 +   return -EINVAL;

 spin_lock(vp_lock);
 status = ia64_pal_vp_init_env(kvm_vsa_base ?
 @@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
 if (status != 0) {
 printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
 -   return ;
 +   return -EINVAL;
 }

 if (!kvm_vsa_base) {
 @@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
 }
 spin_unlock(vp_lock);
 ia64_ptr_entry(0x3, slot);
 +
 +   return 0;
 }

 void kvm_arch_hardware_disable(void *garbage)
 diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
 index 95af622..5902bbc 100644
 --- a/arch/powerpc/kvm/powerpc.c
 +++ b/arch/powerpc/kvm/powerpc.c
 @@ -78,8 +78,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run,  
 struct kvm_vcpu *vcpu)
 return r;
 }

 -void kvm_arch_hardware_enable(void *garbage)
 +int kvm_arch_hardware_enable(void *garbage)
 {
 +   return 0;
 }

 void kvm_arch_hardware_disable(void *garbage)
 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
 index 00e2ce8..5445058 100644
 --- a/arch/s390/kvm/kvm-s390.c
 +++ b/arch/s390/kvm/kvm-s390.c
 @@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = 
 {
 static unsigned long long *facilities;

 /* Section: not file related */
 -void kvm_arch_hardware_enable(void *garbage)
 +int kvm_arch_hardware_enable(void *garbage)
 {
 /* every s390 is virtualization enabled ;-) */
 +   return 0;
 }

 void kvm_arch_hardware_disable(void *garbage)
 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/ 
 kvm_host.h
 index 6046e6f..b17886f 100644
 --- a/arch/x86/include/asm/kvm_host.h
 +++ b/arch/x86/include/asm/kvm_host.h
 @@ -462,7 +462,7 @@ struct descriptor_table {
 struct kvm_x86_ops {
 int (*cpu_has_kvm_support)(void);  /* __init */
 int (*disabled_by_bios)(void); /* __init */
 -   void (*hardware_enable)(void *dummy);  /* __init */
 +   int (*hardware_enable)(void *dummy);
 void (*hardware_disable)(void *dummy);
 void (*check_processor_compatibility)(void *rtn);
 int (*hardware_setup)(void);   /* __init */
 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
 index a5f90c7..2f3a388 100644
 --- a/arch/x86/kvm/svm.c
 +++ b/arch/x86/kvm/svm.c
 @@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
 cpu_svm_disable();
 }

 -static void svm_hardware_enable(void *garbage)
 +static int svm_hardware_enable(void *garbage)
 {

 struct svm_cpu_data *svm_data;
 @@ -325,16 +325,20 @@ static void svm_hardware_enable(void *garbage)
 struct desc_struct *gdt;
 int me = raw_smp_processor_id();

 +   rdmsrl(MSR_EFER, efer);
 +   if 

Re: [PATCH] Activate Virtualization On Demand

2009-09-14 Thread Alexander Graf


On 14.09.2009, at 18:14, Marcelo Tosatti wrote:


On Mon, Sep 14, 2009 at 05:52:48PM +0200, Alexander Graf wrote:


On 14.09.2009, at 15:23, Marcelo Tosatti wrote:


On Wed, Sep 09, 2009 at 04:18:58PM +0200, Alexander Graf wrote:

X86 CPUs need to have some magic happening to enable the
virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid
TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel
module
is loaded. This blocks us from autoloading KVM modules without
breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

Signed-off-by: Alexander Graf ag...@suse.de

--

I've tested the following:

- shutdown
- suspend / resume to RAM
- running VirtualBox while kvm module is loaded
---
arch/ia64/kvm/kvm-ia64.c|8 ++-
arch/powerpc/kvm/powerpc.c  |3 +-
arch/s390/kvm/kvm-s390.c|3 +-
arch/x86/include/asm/kvm_host.h |2 +-
arch/x86/kvm/svm.c  |   13 --
arch/x86/kvm/vmx.c  |7 +++-
arch/x86/kvm/x86.c  |4 +-
include/linux/kvm_host.h|2 +-
virt/kvm/kvm_main.c |   82 +++ 
++

--
9 files changed, 98 insertions(+), 26 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f6471c8..5fdeec5 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64  
*host_iva,

u64 *opt_handler)

static  DEFINE_SPINLOCK(vp_lock);

-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
long  status;
long  tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot  0)
-   return;
+   return -EINVAL;

spin_lock(vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
if (status != 0) {
printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
-   return ;
+   return -EINVAL;
}

if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(vp_lock);
ia64_ptr_entry(0x3, slot);
+
+   return 0;
}

void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/ 
powerpc.c

index 95af622..5902bbc 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -78,8 +78,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run,
struct kvm_vcpu *vcpu)
return r;
}

-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
+   return 0;
}

void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 00e2ce8..5445058 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item  
debugfs_entries[] =

{
static unsigned long long *facilities;

/* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
/* every s390 is virtualization enabled ;-) */
+   return 0;
}

void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/ 
asm/

kvm_host.h
index 6046e6f..b17886f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -462,7 +462,7 @@ struct descriptor_table {
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void);  /* __init */
int (*disabled_by_bios)(void); /* __init */
-   void (*hardware_enable)(void *dummy);  /* __init */
+   int (*hardware_enable)(void *dummy);
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void);   /* __init */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a5f90c7..2f3a388 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
cpu_svm_disable();
}

-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
{

struct svm_cpu_data *svm_data;
@@ -325,16 +325,20 @@ static void svm_hardware_enable(void  
*garbage)

struct desc_struct *gdt;
int me 

Re: [PATCH] Activate Virtualization On Demand

2009-09-14 Thread Marcelo Tosatti
On Mon, Sep 14, 2009 at 06:25:20PM +0200, Alexander Graf wrote:
 having succeeded. The hardware_enable_all caller calls
 hardware_disable_all (kvm_usage_count--) when enabling fails.

 But it does not hold any lock in between hardware_enable_all
 and hardware_disable_all.

 So its unsafe if another kvm_create_vm call happens in between, while
 kvm_usage_count is 1 ?

 So what we really need is a lock, so hardware_enable_all doesn't get  
 called twice? Isn't that what the kvm_lock here does?

Either that or check hardware_enable_failed atomic variable even 
if kvm_usage_count  1.

 Also, better move vmx.c's ept_sync_global from vmx_init to
 hardware_enable.

 Why? What does that do?

 25.3.3.4 Guidelines for Use of the INVEPT Instruction

 Software can use the INVEPT instruction with the “all-context”
 INVEPT type immediately after execution of the VMXON instruction or
 immediately prior to execution of the VMXOFF instruction. Either
 prevents potentially undesired retention of information cached from  
 EPT
 paging structures between separate uses of VMX operation.

 Hmhm. I don't have EPT hardware to test things on, but I can of course  
 make a blind move of the call.

OK, i can do some basic testing before applying the patch.


 Alex
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand

2009-09-14 Thread Alexander Graf


On 14.09.2009, at 18:46, Marcelo Tosatti wrote:


On Mon, Sep 14, 2009 at 06:25:20PM +0200, Alexander Graf wrote:

having succeeded. The hardware_enable_all caller calls
hardware_disable_all (kvm_usage_count--) when enabling fails.


But it does not hold any lock in between hardware_enable_all
and hardware_disable_all.

So its unsafe if another kvm_create_vm call happens in between,  
while

kvm_usage_count is 1 ?


So what we really need is a lock, so hardware_enable_all doesn't get
called twice? Isn't that what the kvm_lock here does?


Either that or check hardware_enable_failed atomic variable even
if kvm_usage_count  1.


The patch does a lock already.


Also, better move vmx.c's ept_sync_global from vmx_init to
hardware_enable.


Why? What does that do?


25.3.3.4 Guidelines for Use of the INVEPT Instruction

Software can use the INVEPT instruction with the “all-context”
INVEPT type immediately after execution of the VMXON instruction or
immediately prior to execution of the VMXOFF instruction. Either
prevents potentially undesired retention of information cached from
EPT
paging structures between separate uses of VMX operation.


Hmhm. I don't have EPT hardware to test things on, but I can of  
course

make a blind move of the call.


OK, i can do some basic testing before applying the patch.


Great :-)

Alex

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand

2009-09-14 Thread Alexander Graf


On 14.09.2009, at 15:23, Marcelo Tosatti wrote:


On Wed, Sep 09, 2009 at 04:18:58PM +0200, Alexander Graf wrote:
X86 CPUs need to have some magic happening to enable the  
virtualization

extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid  
TLB

entries (svm).

Currently KVM activates virtualization when the respective kernel  
module
is loaded. This blocks us from autoloading KVM modules without  
breaking

other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

Signed-off-by: Alexander Graf ag...@suse.de

--

I've tested the following:

 - shutdown
 - suspend / resume to RAM
 - running VirtualBox while kvm module is loaded
---
arch/ia64/kvm/kvm-ia64.c|8 ++-
arch/powerpc/kvm/powerpc.c  |3 +-
arch/s390/kvm/kvm-s390.c|3 +-
arch/x86/include/asm/kvm_host.h |2 +-
arch/x86/kvm/svm.c  |   13 --
arch/x86/kvm/vmx.c  |7 +++-
arch/x86/kvm/x86.c  |4 +-
include/linux/kvm_host.h|2 +-
virt/kvm/kvm_main.c |   82 + 
--

9 files changed, 98 insertions(+), 26 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f6471c8..5fdeec5 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64  
*host_iva, u64 *opt_handler)


static  DEFINE_SPINLOCK(vp_lock);

-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
long  status;
long  tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot  0)
-   return;
+   return -EINVAL;

spin_lock(vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
if (status != 0) {
printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
-   return ;
+   return -EINVAL;
}

if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(vp_lock);
ia64_ptr_entry(0x3, slot);
+
+   return 0;
}

void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 95af622..5902bbc 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -78,8 +78,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run,  
struct kvm_vcpu *vcpu)

return r;
}

-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
+   return 0;
}

void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 00e2ce8..5445058 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item debugfs_entries[]  
= {

static unsigned long long *facilities;

/* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
/* every s390 is virtualization enabled ;-) */
+   return 0;
}

void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/ 
kvm_host.h

index 6046e6f..b17886f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -462,7 +462,7 @@ struct descriptor_table {
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void);  /* __init */
int (*disabled_by_bios)(void); /* __init */
-   void (*hardware_enable)(void *dummy);  /* __init */
+   int (*hardware_enable)(void *dummy);
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void);   /* __init */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a5f90c7..2f3a388 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
cpu_svm_disable();
}

-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
{

struct svm_cpu_data *svm_data;
@@ -325,16 +325,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();

+   rdmsrl(MSR_EFER, efer);
+   if (efer  EFER_SVME)
+   

Re: [PATCH] Activate Virtualization On Demand

2009-09-13 Thread Avi Kivity

On 09/09/2009 05:18 PM, Alexander Graf wrote:

X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.
   


Looks good.

--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] Activate Virtualization On Demand

2009-09-09 Thread Alexander Graf
X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

Signed-off-by: Alexander Graf ag...@suse.de

--

I've tested the following:

  - shutdown
  - suspend / resume to RAM
  - running VirtualBox while kvm module is loaded
---
 arch/ia64/kvm/kvm-ia64.c|8 ++-
 arch/powerpc/kvm/powerpc.c  |3 +-
 arch/s390/kvm/kvm-s390.c|3 +-
 arch/x86/include/asm/kvm_host.h |2 +-
 arch/x86/kvm/svm.c  |   13 --
 arch/x86/kvm/vmx.c  |7 +++-
 arch/x86/kvm/x86.c  |4 +-
 include/linux/kvm_host.h|2 +-
 virt/kvm/kvm_main.c |   82 +--
 9 files changed, 98 insertions(+), 26 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f6471c8..5fdeec5 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 
*opt_handler)
 
 static  DEFINE_SPINLOCK(vp_lock);
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
long  status;
long  tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot  0)
-   return;
+   return -EINVAL;
 
spin_lock(vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
if (status != 0) {
printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
-   return ;
+   return -EINVAL;
}
 
if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(vp_lock);
ia64_ptr_entry(0x3, slot);
+
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 95af622..5902bbc 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -78,8 +78,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu 
*vcpu)
return r;
 }
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 00e2ce8..5445058 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 static unsigned long long *facilities;
 
 /* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
/* every s390 is virtualization enabled ;-) */
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6046e6f..b17886f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -462,7 +462,7 @@ struct descriptor_table {
 struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void);  /* __init */
int (*disabled_by_bios)(void); /* __init */
-   void (*hardware_enable)(void *dummy);  /* __init */
+   int (*hardware_enable)(void *dummy);
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void);   /* __init */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a5f90c7..2f3a388 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
cpu_svm_disable();
 }
 
-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
 {
 
struct svm_cpu_data *svm_data;
@@ -325,16 +325,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();
 
+   rdmsrl(MSR_EFER, efer);
+   if (efer  EFER_SVME)
+   return -EBUSY;
+
if (!has_svm()) {
printk(KERN_ERR svm_cpu_init: err 

Re: [PATCH] Activate Virtualization On Demand v2

2009-06-18 Thread Avi Kivity

On 06/18/2009 12:56 AM, Alexander Graf wrote:
I can test suspend/resume for you if you don't have a friendly 
machine.  I have a personal interest in keeping it working :)



Thinking about it again - there's only the atomic dec_and_test vs. 
read thing and the suspend test missing.


Is the atomic operation as is really that confusing? 


Yes.  It says, something tricky is going on, see if you can find it.

If not, we can keep the patch as is and you simply try s2ram on your 
notebook :-). I'm pretty sure it works - it used to.


It looks like it will work, but these things are tricky.  I'll test an 
updated patch.  Please also test reboot on Intel with a VM spinning and 
with no VMs loaded - Intel reboots are tricky too.


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2009-06-17 Thread Alexander Graf


On 16.06.2009, at 17:13, Avi Kivity wrote:


On 06/16/2009 05:08 PM, Alexander Graf wrote:

Please tell me you tested suspend/resume with/without VMs and cpu
hotunplug/hotplug.



I tested cpu hotplugging. On the last round I tested suspend/ 
resume, but

this time I couldn't because my machine can't do suspend :-(.
So I'll try hard and find a machine I can test it on for the next  
round.




I can test suspend/resume for you if you don't have a friendly  
machine.  I have a personal interest in keeping it working :)


Thinking about it again - there's only the atomic dec_and_test vs.  
read thing and the suspend test missing.


Is the atomic operation as is really that confusing? If not, we can  
keep the patch as is and you simply try s2ram on your notebook :-).  
I'm pretty sure it works - it used to.


Alex

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2009-06-16 Thread Avi Kivity

On 06/15/2009 02:30 PM, Alexander Graf wrote:

X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

+static int hardware_enable_all(void)
+{
+   int r = 0;
+
+   spin_lock(kvm_lock);
+
+   kvm_usage_count++;
+   if (kvm_usage_count == 1) {
+   atomic_set(hardware_enable_failed, 1);
+   on_each_cpu(hardware_enable, NULL, 1);
+
+   if (!atomic_dec_and_test(hardware_enable_failed))
+   r = -EBUSY;
+   }
   


That's a little obfuscated.  I suggest atomic_set(..., p) and 
atomic_read(...).



+
  static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
   void *v)
  {
int cpu = (long)v;

+   if (!kvm_usage_count)
+   return NOTIFY_OK;
+
val= ~CPU_TASKS_FROZEN;
switch (val) {
case CPU_DYING:
@@ -2513,13 +2571,15 @@ static void kvm_exit_debug(void)

  static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  {
-   hardware_disable(NULL);
+   if (kvm_usage_count)
+   hardware_disable(NULL);
return 0;
  }

  static int kvm_resume(struct sys_device *dev)
  {
-   hardware_enable(NULL);
+   if (kvm_usage_count)
+   hardware_enable(NULL);
return 0;
  }

   


Please tell me you tested suspend/resume with/without VMs and cpu 
hotunplug/hotplug.



--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2009-06-16 Thread Avi Kivity

On 06/15/2009 03:17 PM, Christoph Hellwig wrote:

On Mon, Jun 15, 2009 at 01:30:05PM +0200, Alexander Graf wrote:
   

X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.
 


That will only become interesting if we every have such a thing in
mainline.  So NACK, lots of complication for no good reason.
   


If it were truly lots of complication, I might agree.  But it isn't, and 
we keep getting reports from users about it.


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2009-06-16 Thread Alexander Graf
Avi Kivity wrote:
 On 06/15/2009 02:30 PM, Alexander Graf wrote:
 X86 CPUs need to have some magic happening to enable the virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid TLB
 entries (svm).

 Currently KVM activates virtualization when the respective kernel module
 is loaded. This blocks us from autoloading KVM modules without breaking
 other VMMs.

 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on destruction of the last one.

 So using this, KVM can be easily autoloaded, while keeping other
 hypervisors usable.

 +static int hardware_enable_all(void)
 +{
 +int r = 0;
 +
 +spin_lock(kvm_lock);
 +
 +kvm_usage_count++;
 +if (kvm_usage_count == 1) {
 +atomic_set(hardware_enable_failed, 1);
 +on_each_cpu(hardware_enable, NULL, 1);
 +
 +if (!atomic_dec_and_test(hardware_enable_failed))
 +r = -EBUSY;
 +}


 That's a little obfuscated.  I suggest atomic_set(..., p) and
 atomic_read(...).

Ah, I was more searching for an atomic_test :-).

   static int kvm_cpu_hotplug(struct notifier_block *notifier,
 unsigned long val,
  void *v)
   {
   int cpu = (long)v;

 +if (!kvm_usage_count)
 +return NOTIFY_OK;
 +
   val= ~CPU_TASKS_FROZEN;
   switch (val) {
   case CPU_DYING:
 @@ -2513,13 +2571,15 @@ static void kvm_exit_debug(void)

   static int kvm_suspend(struct sys_device *dev, pm_message_t state)
   {
 -hardware_disable(NULL);
 +if (kvm_usage_count)
 +hardware_disable(NULL);
   return 0;
   }

   static int kvm_resume(struct sys_device *dev)
   {
 -hardware_enable(NULL);
 +if (kvm_usage_count)
 +hardware_enable(NULL);
   return 0;
   }


 +

 Please tell me you tested suspend/resume with/without VMs and cpu
 hotunplug/hotplug.

I tested cpu hotplugging. On the last round I tested suspend/resume, but
this time I couldn't because my machine can't do suspend :-(.
So I'll try hard and find a machine I can test it on for the next round.

Alex
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2009-06-16 Thread Avi Kivity

On 06/16/2009 05:08 PM, Alexander Graf wrote:

Please tell me you tested suspend/resume with/without VMs and cpu
hotunplug/hotplug.
 


I tested cpu hotplugging. On the last round I tested suspend/resume, but
this time I couldn't because my machine can't do suspend :-(.
So I'll try hard and find a machine I can test it on for the next round.
   


I can test suspend/resume for you if you don't have a friendly machine.  
I have a personal interest in keeping it working :)


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] Activate Virtualization On Demand v2

2009-06-15 Thread Alexander Graf
X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

---

v2 uses kvm_lock and traces failures atomically

Signed-off-by: Alexander Graf ag...@suse.de
---
 arch/ia64/kvm/kvm-ia64.c|8 ++-
 arch/powerpc/kvm/powerpc.c  |2 +-
 arch/s390/kvm/kvm-s390.c|2 +-
 arch/x86/include/asm/kvm_host.h |2 +-
 arch/x86/kvm/svm.c  |   13 --
 arch/x86/kvm/vmx.c  |7 +++-
 arch/x86/kvm/x86.c  |4 +-
 include/linux/kvm_host.h|2 +-
 virt/kvm/kvm_main.c |   82 +--
 9 files changed, 96 insertions(+), 26 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 906d597..3141a92 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 
*opt_handler)
 
 static  DEFINE_SPINLOCK(vp_lock);
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
long  status;
long  tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot  0)
-   return;
+   return -EINVAL;
 
spin_lock(vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
if (status != 0) {
printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
-   return ;
+   return -EINVAL;
}
 
if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(vp_lock);
ia64_ptr_entry(0x3, slot);
+
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9057335..6558ab7 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -80,7 +80,8 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu 
*vcpu)
return r;
 }
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
+   return 0;
 }
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index cbfe91e..a14e676 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -70,7 +70,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 
 /* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
/* every s390 is virtualization enabled ;-) */
+   return 0;
 }
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4627627..72d5075 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -463,7 +463,7 @@ struct descriptor_table {
 struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void);  /* __init */
int (*disabled_by_bios)(void); /* __init */
-   void (*hardware_enable)(void *dummy);  /* __init */
+   int (*hardware_enable)(void *dummy);
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void);   /* __init */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 04ee964..47a8b94 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -245,7 +245,7 @@ static void svm_hardware_disable(void *garbage)
cpu_svm_disable();
 }
 
-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
 {
 
struct svm_cpu_data *svm_data;
@@ -254,16 +254,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();
 
+   rdmsrl(MSR_EFER, efer);
+   if (efer  EFER_SVME)
+   return -EBUSY;
+
if (!has_svm()) {
printk(KERN_ERR svm_cpu_init: err EOPNOTSUPP on %d\n, me);
-   return;
+   return -EINVAL;
}
svm_data = per_cpu(svm_data, me);
 
if (!svm_data) {
printk(KERN_ERR svm_cpu_init: svm_data 

Re: [PATCH] Activate Virtualization On Demand v2

2009-06-15 Thread Christoph Hellwig
On Mon, Jun 15, 2009 at 01:30:05PM +0200, Alexander Graf wrote:
 X86 CPUs need to have some magic happening to enable the virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid TLB
 entries (svm).
 
 Currently KVM activates virtualization when the respective kernel module
 is loaded. This blocks us from autoloading KVM modules without breaking
 other VMMs.

That will only become interesting if we every have such a thing in
mainline.  So NACK, lots of complication for no good reason.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2009-06-15 Thread Alexander Graf


On 15.06.2009, at 14:17, Christoph Hellwig wrote:


On Mon, Jun 15, 2009 at 01:30:05PM +0200, Alexander Graf wrote:
X86 CPUs need to have some magic happening to enable the  
virtualization

extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid  
TLB

entries (svm).

Currently KVM activates virtualization when the respective kernel  
module
is loaded. This blocks us from autoloading KVM modules without  
breaking

other VMMs.


That will only become interesting if we every have such a thing in
mainline.  So NACK, lots of complication for no good reason.



I don't want to fight political battles here. Seriously - we're out of  
kindergarden.
There are users out there who want to have VBox/VMware and kvm  
installed in parallel and can't have both kernel modules loaded at the  
same time. We're only hurting _our_ users, not the others if we keep  
people from having kvm*.ko loaded.


Sigh.

Alex
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2009-06-15 Thread Christoph Hellwig
On Mon, Jun 15, 2009 at 02:25:01PM +0200, Alexander Graf wrote:
 I don't want to fight political battles here.


So stop that crap.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] Activate Virtualization On Demand

2009-03-17 Thread Alexander Graf
X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

Signed-off-by: Alexander Graf ag...@suse.de
---
 arch/ia64/kvm/kvm-ia64.c|8 +++--
 arch/powerpc/kvm/powerpc.c  |2 +-
 arch/s390/kvm/kvm-s390.c|2 +-
 arch/x86/include/asm/kvm_host.h |2 +-
 arch/x86/kvm/svm.c  |   13 +--
 arch/x86/kvm/vmx.c  |7 +++-
 arch/x86/kvm/x86.c  |4 +-
 include/linux/kvm_host.h|2 +-
 virt/kvm/kvm_main.c |   72 +--
 9 files changed, 87 insertions(+), 25 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index c25347f..df6bab1 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 
*opt_handler)
 
 static  DEFINE_SPINLOCK(vp_lock);
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
long  status;
long  tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot  0)
-   return;
+   return -EINVAL;
 
spin_lock(vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
if (status != 0) {
printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
-   return ;
+   return -EINVAL;
}
 
if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(vp_lock);
ia64_ptr_entry(0x3, slot);
+
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9057335..6558ab7 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -80,7 +80,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu 
*vcpu)
return r;
 }
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
 }
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index cbfe91e..a14e676 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -70,7 +70,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 
 /* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
/* every s390 is virtualization enabled ;-) */
 }
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4627627..72d5075 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -463,7 +463,7 @@ struct descriptor_table {
 struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void);  /* __init */
int (*disabled_by_bios)(void); /* __init */
-   void (*hardware_enable)(void *dummy);  /* __init */
+   int (*hardware_enable)(void *dummy);
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void);   /* __init */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1821c20..06aeb7f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -262,7 +262,7 @@ static void svm_hardware_disable(void *garbage)
cpu_svm_disable();
 }
 
-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
 {
 
struct svm_cpu_data *svm_data;
@@ -271,16 +271,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();
 
+   rdmsrl(MSR_EFER, efer);
+   if (efer  EFER_SVME)
+   return -EBUSY;
+
if (!has_svm()) {
printk(KERN_ERR svm_cpu_init: err EOPNOTSUPP on %d\n, me);
-   return;
+   return -EINVAL;
}
svm_data = per_cpu(svm_data, me);
 
if (!svm_data) {
printk(KERN_ERR svm_cpu_init: svm_data is NULL on %d\n,
   me);
-   return;
+   

Re: [PATCH] Activate Virtualization On Demand

2009-03-17 Thread Avi Kivity

Alexander Graf wrote:

X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.
  



diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 68b217e..7c40743 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -65,6 +65,8 @@ DEFINE_SPINLOCK(kvm_lock);
 LIST_HEAD(vm_list);
 
 static cpumask_var_t cpus_hardware_enabled;

+static int kvm_usage_count = 0;
+static DEFINE_SPINLOCK(kvm_usage_lock);
  


Please use kvm_lock for this.

 
@@ -2327,14 +2341,40 @@ static struct miscdevice kvm_dev = {

kvm_chardev_ops,
 };
 
-static void hardware_enable(void *junk)

+static void hardware_enable(void *_r)
 {
int cpu = raw_smp_processor_id();
+   int r;
+
+   /* If enabling a previous CPU failed already, let's not continue */
+   if (_r  *((int*)_r))
+   return;
  
 
 	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))

return;
+   r = kvm_arch_hardware_enable(NULL);
+   if (_r)
+   *((int*)_r) = r;
  


Racy.  If one cpu succeeds and another fails, the successful one could 
overwrite the failing one's result.


While the race will never happen (start two VMMs simultaneously) it will 
cause an endless stream of complaints.  Let's use an atomic_t 
incremented on each failure.


Oh, and it can be global since we're inside a lock, so some of the 
changes to add a return value become unnecessary.


 
+static void hardware_disable_all(void)

+{
+   if (!kvm_usage_count)
+   return;
  


Can this happen?


+
+   spin_lock(kvm_usage_lock);
+   kvm_usage_count--;
+   if (!kvm_usage_count)
+   on_each_cpu(hardware_disable, NULL, 1);
+   spin_unlock(kvm_usage_lock);
+}
+
  


Please make sure cpu hotplug/hotunplug (and this suspend/resume) still work.

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand

2009-03-17 Thread Alexander Graf


On 17.03.2009, at 13:04, Avi Kivity a...@redhat.com wrote:


Alexander Graf wrote:
X86 CPUs need to have some magic happening to enable the  
virtualization

extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid  
TLB

entries (svm).

Currently KVM activates virtualization when the respective kernel  
module
is loaded. This blocks us from autoloading KVM modules without  
breaking

other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.




diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 68b217e..7c40743 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -65,6 +65,8 @@ DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
 static cpumask_var_t cpus_hardware_enabled;
+static int kvm_usage_count = 0;
+static DEFINE_SPINLOCK(kvm_usage_lock);



Please use kvm_lock for this.


Looks good. Will do :-).





@@ -2327,14 +2341,40 @@ static struct miscdevice kvm_dev = {
   kvm_chardev_ops,
};
-static void hardware_enable(void *junk)
+static void hardware_enable(void *_r)
{
   int cpu = raw_smp_processor_id();
+int r;
+
+/* If enabling a previous CPU failed already, let's not  
continue */

+if (_r  *((int*)_r))
+return;
   if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
   return;
+r = kvm_arch_hardware_enable(NULL);
+if (_r)
+*((int*)_r) = r;



Racy.  If one cpu succeeds and another fails, the successful one  
could overwrite the failing one's result.


While the race will never happen (start two VMMs simultaneously) it  
will cause an endless stream of complaints.  Let's use an atomic_t  
incremented on each failure.


Oh, and it can be global since we're inside a lock, so some of the  
changes to add a return value become unnecessary.


Right, that probably cleans up things a bit.





+static void hardware_disable_all(void)
+{
+if (!kvm_usage_count)
+return;



Can this happen?


It should not. Better make it a BUG(...)?





+
+spin_lock(kvm_usage_lock);
+kvm_usage_count--;
+if (!kvm_usage_count)
+on_each_cpu(hardware_disable, NULL, 1);
+spin_unlock(kvm_usage_lock);
+}
+



Please make sure cpu hotplug/hotunplug (and this suspend/resume)  
still work.


Make sure as in test?

Alex




--
error compiling committee.c: too
many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v3

2008-11-06 Thread Alexander Graf


On 05.11.2008, at 21:58, Eduardo Habkost wrote:


On Wed, Nov 05, 2008 at 11:41:04AM +0100, Alexander Graf wrote:
X86 CPUs need to have some magic happening to enable the  
virtualization

extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid  
TLB

entries (svm).

Currently KVM activates virtualization when the respective kernel  
module
is loaded. This blocks us from autoloading KVM modules without  
breaking

other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on removal of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

v2 adds returns to non-x86 hardware_enables and adds IA64 change
v3 changes:
 - use spin_lock instead of atomics
 - put locking to new functions hardware_{en,dis}able_all that get  
called

   on VM creation/destruction
 - remove usage counter checks where not necessary
 - return -EINVAL for IA64 slot  0 case

Signed-off-by: Alexander Graf [EMAIL PROTECTED]
---

snip


-static void hardware_enable(void *junk)
+static void hardware_enable(void *_r)
{
int cpu = raw_smp_processor_id();
+   int r;
+
+   /* If enabling a previous CPU failed already, let's not continue */
+   if (_r  *((int*)_r))
+   return;

if (cpu_isset(cpu, cpus_hardware_enabled))
return;
+   r = kvm_arch_hardware_enable(NULL);
+   if (_r)
+   *((int*)_r) = r;
+   if (r) {
+   printk(KERN_INFO kvm: enabling virtualization on 
+CPU%d failed\n, cpu);
+   return;
+   }
+
cpu_set(cpu, cpus_hardware_enabled);
-   kvm_arch_hardware_enable(NULL);
+}


Doesn't on_each_cpu() run the function in parallel on all CPUs? If so,
there is a race between checking *_r and setting *_r.


Good question - it doesn't really hurt to write the value though, if  
we only write it on error.
So I guess we could just remove the first check and check on if( r   
_r) later on.


Alex
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Alexander Graf
X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on removal of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

v2 adds returns to non-x86 hardware_enables and adds IA64 change

Signed-off-by: Alexander Graf [EMAIL PROTECTED]
---
 arch/ia64/kvm/kvm-ia64.c   |8 +++--
 arch/powerpc/kvm/powerpc.c |3 +-
 arch/s390/kvm/kvm-s390.c   |3 +-
 arch/x86/kvm/svm.c |   13 +--
 arch/x86/kvm/vmx.c |7 -
 arch/x86/kvm/x86.c |4 +-
 include/asm-x86/kvm_host.h |2 +-
 include/linux/kvm_host.h   |2 +-
 virt/kvm/kvm_main.c|   72 +++
 9 files changed, 80 insertions(+), 34 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index c25c75b..2bd79db 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -110,7 +110,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 
*opt_handler)
 
 static  DEFINE_SPINLOCK(vp_lock);
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
long  status;
long  tmp_base;
@@ -124,7 +124,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot  0)
-   return;
+   return -ENOMEM;
 
spin_lock(vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -132,7 +132,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, tmp_base);
if (status != 0) {
printk(KERN_WARNINGkvm: Failed to Enable VT Support\n);
-   return ;
+   return -EINVAL;
}
 
if (!kvm_vsa_base) {
@@ -141,6 +141,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(vp_lock);
ia64_ptr_entry(0x3, slot);
+
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 90a6fc4..dce664b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -79,8 +79,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu 
*vcpu)
return r;
 }
 
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8b00eb2..91e08e5 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -70,9 +70,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 
 /* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
 {
/* every s390 is virtualization enabled ;-) */
+   return 0;
 }
 
 void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 05efc4e..3a8a820 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -275,7 +275,7 @@ static void svm_hardware_disable(void *garbage)
wrmsrl(MSR_EFER, efer  ~MSR_EFER_SVME_MASK);
 }
 
-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
 {
 
struct svm_cpu_data *svm_data;
@@ -284,16 +284,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();
 
+   rdmsrl(MSR_EFER, efer);
+   if (efer  MSR_EFER_SVME_MASK)
+   return -EBUSY;
+
if (!has_svm()) {
printk(KERN_ERR svm_cpu_init: err EOPNOTSUPP on %d\n, me);
-   return;
+   return -EINVAL;
}
svm_data = per_cpu(svm_data, me);
 
if (!svm_data) {
printk(KERN_ERR svm_cpu_init: svm_data is NULL on %d\n,
   me);
-   return;
+   return -EINVAL;
}
 
svm_data-asid_generation = 1;
@@ -304,11 +308,12 @@ static void svm_hardware_enable(void *garbage)
gdt = (struct desc_struct *)gdt_descr.address;
svm_data-tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
 
-   rdmsrl(MSR_EFER, efer);
wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
 
wrmsrl(MSR_VM_HSAVE_PA,
   page_to_pfn(svm_data-save_area)  PAGE_SHIFT);
+
+   return 

Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Avi Kivity

Alexander Graf wrote:

X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on removal of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

v2 adds returns to non-x86 hardware_enables and adds IA64 change

@@ -563,19 +566,27 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops 
= {
 
 static struct kvm *kvm_create_vm(void)

 {
+   int r;
struct kvm *kvm = kvm_arch_create_vm();
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct page *page;
 #endif
 
 	if (IS_ERR(kvm))

-   goto out;
+   return kvm;
+
+   if (atomic_add_return(1, kvm_usage_count) == 1) {
+   on_each_cpu(hardware_enable, r, 1);
+
+   if (r)
+   goto out_err;
+   }
  


This can race -- if we're preempted immediately after 
atomic_add_return(), a second vm creation will see the count elevated 
and can start executing without virtualization enabled.



+
+out_err:
+   if (atomic_dec_and_test(kvm_usage_count))
+   on_each_cpu(hardware_disable, NULL, 1);
  


Similar race.


@@ -660,6 +674,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
mmu_notifier_unregister(kvm-mmu_notifier, kvm-mm);
 #endif
kvm_arch_destroy_vm(kvm);
+   if (atomic_dec_and_test(kvm_usage_count))
+   on_each_cpu(hardware_disable, NULL, 1);
mmdrop(mm);
 }
  


And again.  I suggest returning to spinlocks (and placing the duplicated 
disable code in a function).


 
-static void hardware_enable(void *junk)

+static void hardware_enable(void *_r)
 {
int cpu = raw_smp_processor_id();
+   int r;
 
 	if (cpu_isset(cpu, cpus_hardware_enabled))

return;
+   r = kvm_arch_hardware_enable(NULL);
+   if (_r)
+   *((int*)_r) = r;
+   if (r) {
+   printk(KERN_INFO kvm: enabling virtualization on 
+CPU%d failed\n, cpu);
+   return;
+   }
+
cpu_set(cpu, cpus_hardware_enabled);
-   kvm_arch_hardware_enable(NULL);
 }
  


We'll be in a nice fix if we can only enable virtualization on some 
processors; that's the reason hardware_enable() was originally specified 
as returning void.


I don't see an easy way out, but it's hardly a likely event.


case CPU_UP_CANCELED:
printk(KERN_INFO kvm: disabling virtualization on CPU%d\n,
   cpu);
-   smp_call_function_single(cpu, hardware_disable, NULL, 1);
+   if (atomic_read(kvm_usage_count))
+   smp_call_function_single(cpu, hardware_disable,
+   NULL, 1);
break;
case CPU_ONLINE:
printk(KERN_INFO kvm: enabling virtualization on CPU%d\n,
   cpu);
-   smp_call_function_single(cpu, hardware_enable, NULL, 1);
+   if (atomic_read(kvm_usage_count))
+   smp_call_function_single(cpu, hardware_enable,
+NULL, 1);
break;
  


Are these called in a point where processes can't run?  Otherwise 
there's a race here.



 static int kvm_resume(struct sys_device *dev)
 {
-   hardware_enable(NULL);
+   if (atomic_read(kvm_usage_count))
+   hardware_enable(NULL);
return 0;
 }
  


Move the test to hardware_enable()?  It's repeated too often.


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Daniel P. Berrange
On Wed, Nov 05, 2008 at 09:48:16AM +0100, Alexander Graf wrote:
 X86 CPUs need to have some magic happening to enable the virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid TLB
 entries (svm).
 
 Currently KVM activates virtualization when the respective kernel module
 is loaded. This blocks us from autoloading KVM modules without breaking
 other VMMs.
 
 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on removal of the last one.

Pardon my unfamiliarity with the code, but with this change applied,
will we still get an explicit error at time of loading kvm-intel.ko
if VMX were disabled in the BIOS ? Or will that error reporting be 
pushed off to time when VMX is first activated ?

Daniel
-- 
|: Red Hat, Engineering, London   -o-   http://people.redhat.com/berrange/ :|
|: http://libvirt.org  -o-  http://virt-manager.org  -o-  http://ovirt.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: GnuPG: 7D3B9505  -o-  F3C9 553F A1DA 4AC2 5648 23C1 B3DF F742 7D3B 9505 :|
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Alexander Graf
Daniel P. Berrange wrote:
 On Wed, Nov 05, 2008 at 09:48:16AM +0100, Alexander Graf wrote:
   
 X86 CPUs need to have some magic happening to enable the virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid TLB
 entries (svm).

 Currently KVM activates virtualization when the respective kernel module
 is loaded. This blocks us from autoloading KVM modules without breaking
 other VMMs.

 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on removal of the last one.
 

 Pardon my unfamiliarity with the code, but with this change applied,
 will we still get an explicit error at time of loading kvm-intel.ko
 if VMX were disabled in the BIOS ? Or will that error reporting be 
 pushed off to time when VMX is first activated ?
   

The BIOS and hardware support checks are in kvm_arch_init, which is
still called at load-time of the module.

Alex
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Alexander Graf
Avi Kivity wrote:
 Alexander Graf wrote:

  

 We'll be in a nice fix if we can only enable virtualization on some
 processors; that's the reason hardware_enable() was originally
 specified as returning void.

 I don't see an easy way out, but it's hardly a likely event.
 

 I don't think there's any way we can circumvent that.
   

 No.  We can live with it though.

 What I've wanted to ask for some time already: How does suspend/resume
 work? 

 The question is important, even without the first word.

 I only see one suspend/resume hook that disables virt on the
 currently running CPU. Why don't we have to loop through the CPUs to
 enable/disable all of them?
 At least for suspend-to-disk this sounds pretty necessary.

   

 Suspend first offlines all other cpus.

Ah, ok.

  {
 -hardware_enable(NULL);
 +if (atomic_read(kvm_usage_count))
 +hardware_enable(NULL);
  return 0;
  }
 
 Move the test to hardware_enable()?  It's repeated too often.
 

 What do we do about the on_each_cpu(hardware_enable) cases? We couldn't
 tell when to activate/deactive virtualization then, as that's
 semantically bound to amount of VMs.
   
  static int kvm_resume(struct sys_device *dev)

 I don't understand.  Moving the test to within the IPI shouldn't
 affect anything.

Oh, you only want the test to be in hardware_enable and
hardware_disable. Now I see what you mean: modify and lock
kvm_usage_count outside, but test inside of hardware_enable.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Avi Kivity

Alexander Graf wrote:

 


We'll be in a nice fix if we can only enable virtualization on some
processors; that's the reason hardware_enable() was originally
specified as returning void.

I don't see an easy way out, but it's hardly a likely event.



I don't think there's any way we can circumvent that.
  


No.  We can live with it though.


What I've wanted to ask for some time already: How does suspend/resume
work? 


The question is important, even without the first word.


I only see one suspend/resume hook that disables virt on the
currently running CPU. Why don't we have to loop through the CPUs to
enable/disable all of them?
At least for suspend-to-disk this sounds pretty necessary.

  


Suspend first offlines all other cpus.


 static int kvm_resume(struct sys_device *dev)
 {
-hardware_enable(NULL);
+if (atomic_read(kvm_usage_count))
+hardware_enable(NULL);
 return 0;
 }
  
  

Move the test to hardware_enable()?  It's repeated too often.



What do we do about the on_each_cpu(hardware_enable) cases? We couldn't
tell when to activate/deactive virtualization then, as that's
semantically bound to amount of VMs.
  


I don't understand.  Moving the test to within the IPI shouldn't affect 
anything.


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Alexander Graf
Avi Kivity wrote:
 Alexander Graf wrote:

[snip]

  static int kvm_resume(struct sys_device *dev)
  {
 -hardware_enable(NULL);
 +if (atomic_read(kvm_usage_count))
 +hardware_enable(NULL);
  return 0;
  }
 
 Move the test to hardware_enable()?  It's repeated too often.
 

 What do we do about the on_each_cpu(hardware_enable) cases? We couldn't
 tell when to activate/deactive virtualization then, as that's
 semantically bound to amount of VMs.
   

 I don't understand.  Moving the test to within the IPI shouldn't
 affect anything.


Actually it's there already. Since we have the cpu_isset if here, we
won't get disabling when it's disabled or enabling when it's enabled on
a per-cpu basis. That code would've just saved us the IPIs :-).

So I'll add  hardware_{en,dis}able_all functions that do the locking and
increase / decrease the counter. Disable is used twice, while enable
only once. But for the sake of code readability, I think it might be a
good idea to have both of them be functions.

Also the locking isn't really needed in most cases (CPU hotplug,
suspend, resume, reboot, exit), since we don't schedule there.

Alex
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Alexander Graf
Avi Kivity wrote:
 Alexander Graf wrote:
 X86 CPUs need to have some magic happening to enable the virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid TLB
 entries (svm).

 Currently KVM activates virtualization when the respective kernel module
 is loaded. This blocks us from autoloading KVM modules without breaking
 other VMMs.

 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on removal of the last one.

 So using this, KVM can be easily autoloaded, while keeping other
 hypervisors usable.

 v2 adds returns to non-x86 hardware_enables and adds IA64 change

[snip]


 @@ -660,6 +674,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
  mmu_notifier_unregister(kvm-mmu_notifier, kvm-mm);
  #endif
  kvm_arch_destroy_vm(kvm);
 +if (atomic_dec_and_test(kvm_usage_count))
 +on_each_cpu(hardware_disable, NULL, 1);
  mmdrop(mm);
  }
   

 And again.  I suggest returning to spinlocks (and placing the
 duplicated disable code in a function).

OK.


  
 -static void hardware_enable(void *junk)
 +static void hardware_enable(void *_r)
  {
  int cpu = raw_smp_processor_id();
 +int r;
  
  if (cpu_isset(cpu, cpus_hardware_enabled))
  return;
 +r = kvm_arch_hardware_enable(NULL);
 +if (_r)
 +*((int*)_r) = r;
 +if (r) {
 +printk(KERN_INFO kvm: enabling virtualization on 
 + CPU%d failed\n, cpu);
 +return;
 +}
 +
  cpu_set(cpu, cpus_hardware_enabled);
 -kvm_arch_hardware_enable(NULL);
  }
   

 We'll be in a nice fix if we can only enable virtualization on some
 processors; that's the reason hardware_enable() was originally
 specified as returning void.

 I don't see an easy way out, but it's hardly a likely event.

I don't think there's any way we can circumvent that.
What I've wanted to ask for some time already: How does suspend/resume
work? I only see one suspend/resume hook that disables virt on the
currently running CPU. Why don't we have to loop through the CPUs to
enable/disable all of them?
At least for suspend-to-disk this sounds pretty necessary.

  printk(KERN_INFO kvm: disabling virtualization on CPU%d\n,
 cpu);
 -smp_call_function_single(cpu, hardware_disable, NULL, 1);
 +if (atomic_read(kvm_usage_count))
 +smp_call_function_single(cpu, hardware_disable,
 +NULL, 1);
  break;
  case CPU_ONLINE:
  printk(KERN_INFO kvm: enabling virtualization on CPU%d\n,
 cpu);
 -smp_call_function_single(cpu, hardware_enable, NULL, 1);
 +if (atomic_read(kvm_usage_count))
 +smp_call_function_single(cpu, hardware_enable,
 + NULL, 1);
  break;
   
  case CPU_UP_CANCELED:

 Are these called in a point where processes can't run?  Otherwise
 there's a race here.

Yes.

static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
.priority = 20, /* must be  scheduler priority */
};



  static int kvm_resume(struct sys_device *dev)
  {
 -hardware_enable(NULL);
 +if (atomic_read(kvm_usage_count))
 +hardware_enable(NULL);
  return 0;
  }
   

 Move the test to hardware_enable()?  It's repeated too often.

What do we do about the on_each_cpu(hardware_enable) cases? We couldn't
tell when to activate/deactive virtualization then, as that's
semantically bound to amount of VMs.


Alex
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v3

2008-11-05 Thread Alexander Graf
Avi Kivity wrote:
 Alexander Graf wrote:
 X86 CPUs need to have some magic happening to enable the virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid TLB
 entries (svm).

 Currently KVM activates virtualization when the respective kernel module
 is loaded. This blocks us from autoloading KVM modules without breaking
 other VMMs.

 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on removal of the last one.

 So using this, KVM can be easily autoloaded, while keeping other
 hypervisors usable.

 v2 adds returns to non-x86 hardware_enables and adds IA64 change
 v3 changes:
   - use spin_lock instead of atomics
   - put locking to new functions hardware_{en,dis}able_all that get
 called
 on VM creation/destruction
   - remove usage counter checks where not necessary
   - return -EINVAL for IA64 slot  0 case

   

 Is this v3 with all the latest changes?  it precedes some messages
 where you say you'll change things by about 40 minutes.

Yeah, my clock was off. Somehow I get host time drifts sometimes when
using KVM and my NTP client wasn't running. But maybe it's just me doing
something wrong.

 In any case, I'll defer applying until Eduardo's kdump/reboot changes
 go in, since they touch the same places, and Eduardo's changes are
 much harder to test.

I agree.
Apart from that, do I get an ACK for it, so I can at least put it into
our package and rest assured nothing obvious is wrong :-)?

Alex
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Christian Borntraeger
Am Mittwoch, 5. November 2008 schrieb Alexander Graf:
   printk(KERN_INFO kvm: disabling virtualization on CPU%d\n,
  cpu);
[...]
   printk(KERN_INFO kvm: disabling virtualization on CPU%d\n,
  cpu);
[...]
   printk(KERN_INFO kvm: enabling virtualization on CPU%d\n,
  cpu);
[...]
   printk(KERN_INFO kvm: exiting hardware virtualization\n);

When you are at it, could move these printk to the arches that atually 
enable/disable virtualization?

For example you could do something like 
if (callback) {
printk ...;
callback();
}

And then you could remove kvm_arch_hardware_enable/disable from s390 and 
powerpc.

Havng these messages on s390 and powerpc makes absolutely no sense.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Avi Kivity

Christian Borntraeger wrote:
When you are at it, could move these printk to the arches that atually 
enable/disable virtualization?


For example you could do something like 
if (callback) {

printk ...;
callback();
}

And then you could remove kvm_arch_hardware_enable/disable from s390 and 
powerpc.


Havng these messages on s390 and powerpc makes absolutely no sense.
  


In a separate patch, this is big enough already.

We could do this using kconfig

config KVM_NEEDS_HARDWARE_DETECT
   bool
   default n

and override it for x86/ia64.  This would remove the need to implement 
dummy callbacks in s390/ppc.


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v2

2008-11-05 Thread Alexander Graf
Zhang, Xiantao wrote:
 Alexander Graf wrote:
   
 X86 CPUs need to have some magic happening to enable the
 virtualization 
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid
 TLB 
 entries (svm).

 Currently KVM activates virtualization when the respective kernel
 module 
 is loaded. This blocks us from autoloading KVM modules without
 breaking 
 other VMMs.

 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on removal of the last one.

 So using this, KVM can be easily autoloaded, while keeping other
 hypervisors usable.

 v2 adds returns to non-x86 hardware_enables and adds IA64 change

 Signed-off-by: Alexander Graf [EMAIL PROTECTED]
 ---
  arch/ia64/kvm/kvm-ia64.c   |8 +++--
  arch/powerpc/kvm/powerpc.c |3 +-
  arch/s390/kvm/kvm-s390.c   |3 +-
  arch/x86/kvm/svm.c |   13 +--
  arch/x86/kvm/vmx.c |7 -
  arch/x86/kvm/x86.c |4 +-
  include/asm-x86/kvm_host.h |2 +-
  include/linux/kvm_host.h   |2 +-
  virt/kvm/kvm_main.c|   72
  +++ 9 files changed, 80
 insertions(+), 34 deletions(-) 

 diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
 index c25c75b..2bd79db 100644
 --- a/arch/ia64/kvm/kvm-ia64.c
 +++ b/arch/ia64/kvm/kvm-ia64.c
 @@ -110,7 +110,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva,
 u64 *opt_handler) 

  static  DEFINE_SPINLOCK(vp_lock);

 -void kvm_arch_hardware_enable(void *garbage)
 +int kvm_arch_hardware_enable(void *garbage)
  {
  long  status;
  long  tmp_base;
 @@ -124,7 +124,7 @@ void kvm_arch_hardware_enable(void *garbage)
  slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
  local_irq_restore(saved_psr);
  if (slot  0)
 -return;
 +return -ENOMEM;
 

 Return -EINVAL maybe more accurate. Here slot  0 means invalid or uncorrect 
 parameters are passed to ia64_itr_entry. 
 Xiantao
   
Okies.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v3

2008-11-05 Thread Avi Kivity

Alexander Graf wrote:

In any case, I'll defer applying until Eduardo's kdump/reboot changes
go in, since they touch the same places, and Eduardo's changes are
much harder to test.



I agree.
Apart from that, do I get an ACK for it, so I can at least put it into
our package and rest assured nothing obvious is wrong :-)?
  


It seems fine.

(the part where you call on_each_cpu and have all functions write their 
reply into the same r is icky, but should work AFAICS)


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v3

2008-11-05 Thread Avi Kivity

Alexander Graf wrote:

X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).

Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.

To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on removal of the last one.

So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.

v2 adds returns to non-x86 hardware_enables and adds IA64 change
v3 changes:
  - use spin_lock instead of atomics
  - put locking to new functions hardware_{en,dis}able_all that get called
on VM creation/destruction
  - remove usage counter checks where not necessary
  - return -EINVAL for IA64 slot  0 case

  


Is this v3 with all the latest changes?  it precedes some messages where 
you say you'll change things by about 40 minutes.


In any case, I'll defer applying until Eduardo's kdump/reboot changes go 
in, since they touch the same places, and Eduardo's changes are much 
harder to test.


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Activate Virtualization On Demand v3

2008-11-05 Thread Eduardo Habkost
On Wed, Nov 05, 2008 at 11:41:04AM +0100, Alexander Graf wrote:
 X86 CPUs need to have some magic happening to enable the virtualization
 extensions on them. This magic can result in unpleasant results for
 users, like blocking other VMMs from working (vmx) or using invalid TLB
 entries (svm).
 
 Currently KVM activates virtualization when the respective kernel module
 is loaded. This blocks us from autoloading KVM modules without breaking
 other VMMs.
 
 To circumvent this problem at least a bit, this patch introduces on
 demand activation of virtualization. This means, that instead
 virtualization is enabled on creation of the first virtual machine
 and disabled on removal of the last one.
 
 So using this, KVM can be easily autoloaded, while keeping other
 hypervisors usable.
 
 v2 adds returns to non-x86 hardware_enables and adds IA64 change
 v3 changes:
   - use spin_lock instead of atomics
   - put locking to new functions hardware_{en,dis}able_all that get called
 on VM creation/destruction
   - remove usage counter checks where not necessary
   - return -EINVAL for IA64 slot  0 case
 
 Signed-off-by: Alexander Graf [EMAIL PROTECTED]
 ---
snip
  
 -static void hardware_enable(void *junk)
 +static void hardware_enable(void *_r)
  {
   int cpu = raw_smp_processor_id();
 + int r;
 +
 + /* If enabling a previous CPU failed already, let's not continue */
 + if (_r  *((int*)_r))
 + return;
  
   if (cpu_isset(cpu, cpus_hardware_enabled))
   return;
 + r = kvm_arch_hardware_enable(NULL);
 + if (_r)
 + *((int*)_r) = r;
 + if (r) {
 + printk(KERN_INFO kvm: enabling virtualization on 
 +  CPU%d failed\n, cpu);
 + return;
 + }
 +
   cpu_set(cpu, cpus_hardware_enabled);
 - kvm_arch_hardware_enable(NULL);
 +}

Doesn't on_each_cpu() run the function in parallel on all CPUs? If so,
there is a race between checking *_r and setting *_r.

 +
 +static int hardware_enable_all(void)
 +{
 + int r = 0;
 +
 + spin_lock(kvm_usage_lock);
 + kvm_usage_count++;
 + if (kvm_usage_count == 1)
 + on_each_cpu(hardware_enable, r, 1);
 + spin_unlock(kvm_usage_lock);
 +
 + return r;
  }
  
snip

-- 
Eduardo
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html