Re: [PATCH v4 2/6] kvm: Remove ept_identity_pagetable from struct kvm_arch.

2014-09-10 Thread Gleb Natapov
On Wed, Aug 27, 2014 at 06:17:37PM +0800, Tang Chen wrote:
 kvm_arch-ept_identity_pagetable holds the ept identity pagetable page. But
 it is never used to refer to the page at all.
 
 In vcpu initialization, it indicates two things:
 1. indicates if ept page is allocated
 2. indicates if a memory slot for identity page is initialized
 
 Actually, kvm_arch-ept_identity_pagetable_done is enough to tell if the ept
 identity pagetable is initialized. So we can remove ept_identity_pagetable.
 
 NOTE: In the original code, ept identity pagetable page is pinned in memroy.
   As a result, it cannot be migrated/hot-removed. After this patch, since
   kvm_arch-ept_identity_pagetable is removed, ept identity pagetable page
   is no longer pinned in memory. And it can be migrated/hot-removed.
Reviewed-by: Gleb Natapov g...@kernel.org

 
 Signed-off-by: Tang Chen tangc...@cn.fujitsu.com
 ---
  arch/x86/include/asm/kvm_host.h |  1 -
  arch/x86/kvm/vmx.c  | 50 
 -
  arch/x86/kvm/x86.c  |  2 --
  3 files changed, 25 insertions(+), 28 deletions(-)
 
 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
 index 7c492ed..35171c7 100644
 --- a/arch/x86/include/asm/kvm_host.h
 +++ b/arch/x86/include/asm/kvm_host.h
 @@ -580,7 +580,6 @@ struct kvm_arch {
  
   gpa_t wall_clock;
  
 - struct page *ept_identity_pagetable;
   bool ept_identity_pagetable_done;
   gpa_t ept_identity_map_addr;
  
 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
 index 4b80ead..953d529 100644
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
 @@ -743,6 +743,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment 
 *var);
  static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
  static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
  static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
 +static int alloc_identity_pagetable(struct kvm *kvm);
  
  static DEFINE_PER_CPU(struct vmcs *, vmxarea);
  static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
 @@ -3938,21 +3939,27 @@ out:
  
  static int init_rmode_identity_map(struct kvm *kvm)
  {
 - int i, idx, r, ret;
 + int i, idx, r, ret = 0;
   pfn_t identity_map_pfn;
   u32 tmp;
  
   if (!enable_ept)
   return 1;
 - if (unlikely(!kvm-arch.ept_identity_pagetable)) {
 - printk(KERN_ERR EPT: identity-mapping pagetable 
 - haven't been allocated!\n);
 - return 0;
 +
 + /* Protect kvm-arch.ept_identity_pagetable_done. */
 + mutex_lock(kvm-slots_lock);
 +
 + if (likely(kvm-arch.ept_identity_pagetable_done)) {
 + ret = 1;
 + goto out2;
   }
 - if (likely(kvm-arch.ept_identity_pagetable_done))
 - return 1;
 - ret = 0;
 +
   identity_map_pfn = kvm-arch.ept_identity_map_addr  PAGE_SHIFT;
 +
 + r = alloc_identity_pagetable(kvm);
 + if (r)
 + goto out2;
 +
   idx = srcu_read_lock(kvm-srcu);
   r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
   if (r  0)
 @@ -3970,6 +3977,9 @@ static int init_rmode_identity_map(struct kvm *kvm)
   ret = 1;
  out:
   srcu_read_unlock(kvm-srcu, idx);
 +
 +out2:
 + mutex_unlock(kvm-slots_lock);
   return ret;
  }
  
 @@ -4019,31 +4029,23 @@ out:
  
  static int alloc_identity_pagetable(struct kvm *kvm)
  {
 - struct page *page;
 + /*
 +  * In init_rmode_identity_map(), kvm-arch.ept_identity_pagetable_done
 +  * is checked before calling this function and set to true after the
 +  * calling. The access to kvm-arch.ept_identity_pagetable_done should
 +  * be protected by kvm-slots_lock.
 +  */
 +
   struct kvm_userspace_memory_region kvm_userspace_mem;
   int r = 0;
  
 - mutex_lock(kvm-slots_lock);
 - if (kvm-arch.ept_identity_pagetable)
 - goto out;
   kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
   kvm_userspace_mem.flags = 0;
   kvm_userspace_mem.guest_phys_addr =
   kvm-arch.ept_identity_map_addr;
   kvm_userspace_mem.memory_size = PAGE_SIZE;
   r = __kvm_set_memory_region(kvm, kvm_userspace_mem);
 - if (r)
 - goto out;
  
 - page = gfn_to_page(kvm, kvm-arch.ept_identity_map_addr  PAGE_SHIFT);
 - if (is_error_page(page)) {
 - r = -EFAULT;
 - goto out;
 - }
 -
 - kvm-arch.ept_identity_pagetable = page;
 -out:
 - mutex_unlock(kvm-slots_lock);
   return r;
  }
  
 @@ -7643,8 +7645,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm 
 *kvm, unsigned int id)
   kvm-arch.ept_identity_map_addr =
   VMX_EPT_IDENTITY_PAGETABLE_ADDR;
   err = -ENOMEM;
 - if (alloc_identity_pagetable(kvm) != 0)
 - goto free_vmcs;
   if (!init_rmode_identity_map(kvm))
   goto 

[PATCH v4 2/6] kvm: Remove ept_identity_pagetable from struct kvm_arch.

2014-08-27 Thread Tang Chen
kvm_arch-ept_identity_pagetable holds the ept identity pagetable page. But
it is never used to refer to the page at all.

In vcpu initialization, it indicates two things:
1. indicates if ept page is allocated
2. indicates if a memory slot for identity page is initialized

Actually, kvm_arch-ept_identity_pagetable_done is enough to tell if the ept
identity pagetable is initialized. So we can remove ept_identity_pagetable.

NOTE: In the original code, ept identity pagetable page is pinned in memroy.
  As a result, it cannot be migrated/hot-removed. After this patch, since
  kvm_arch-ept_identity_pagetable is removed, ept identity pagetable page
  is no longer pinned in memory. And it can be migrated/hot-removed.

Signed-off-by: Tang Chen tangc...@cn.fujitsu.com
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/vmx.c  | 50 -
 arch/x86/kvm/x86.c  |  2 --
 3 files changed, 25 insertions(+), 28 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7c492ed..35171c7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -580,7 +580,6 @@ struct kvm_arch {
 
gpa_t wall_clock;
 
-   struct page *ept_identity_pagetable;
bool ept_identity_pagetable_done;
gpa_t ept_identity_map_addr;
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4b80ead..953d529 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -743,6 +743,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment 
*var);
 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
+static int alloc_identity_pagetable(struct kvm *kvm);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -3938,21 +3939,27 @@ out:
 
 static int init_rmode_identity_map(struct kvm *kvm)
 {
-   int i, idx, r, ret;
+   int i, idx, r, ret = 0;
pfn_t identity_map_pfn;
u32 tmp;
 
if (!enable_ept)
return 1;
-   if (unlikely(!kvm-arch.ept_identity_pagetable)) {
-   printk(KERN_ERR EPT: identity-mapping pagetable 
-   haven't been allocated!\n);
-   return 0;
+
+   /* Protect kvm-arch.ept_identity_pagetable_done. */
+   mutex_lock(kvm-slots_lock);
+
+   if (likely(kvm-arch.ept_identity_pagetable_done)) {
+   ret = 1;
+   goto out2;
}
-   if (likely(kvm-arch.ept_identity_pagetable_done))
-   return 1;
-   ret = 0;
+
identity_map_pfn = kvm-arch.ept_identity_map_addr  PAGE_SHIFT;
+
+   r = alloc_identity_pagetable(kvm);
+   if (r)
+   goto out2;
+
idx = srcu_read_lock(kvm-srcu);
r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
if (r  0)
@@ -3970,6 +3977,9 @@ static int init_rmode_identity_map(struct kvm *kvm)
ret = 1;
 out:
srcu_read_unlock(kvm-srcu, idx);
+
+out2:
+   mutex_unlock(kvm-slots_lock);
return ret;
 }
 
@@ -4019,31 +4029,23 @@ out:
 
 static int alloc_identity_pagetable(struct kvm *kvm)
 {
-   struct page *page;
+   /*
+* In init_rmode_identity_map(), kvm-arch.ept_identity_pagetable_done
+* is checked before calling this function and set to true after the
+* calling. The access to kvm-arch.ept_identity_pagetable_done should
+* be protected by kvm-slots_lock.
+*/
+
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
 
-   mutex_lock(kvm-slots_lock);
-   if (kvm-arch.ept_identity_pagetable)
-   goto out;
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
kvm_userspace_mem.flags = 0;
kvm_userspace_mem.guest_phys_addr =
kvm-arch.ept_identity_map_addr;
kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, kvm_userspace_mem);
-   if (r)
-   goto out;
 
-   page = gfn_to_page(kvm, kvm-arch.ept_identity_map_addr  PAGE_SHIFT);
-   if (is_error_page(page)) {
-   r = -EFAULT;
-   goto out;
-   }
-
-   kvm-arch.ept_identity_pagetable = page;
-out:
-   mutex_unlock(kvm-slots_lock);
return r;
 }
 
@@ -7643,8 +7645,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, 
unsigned int id)
kvm-arch.ept_identity_map_addr =
VMX_EPT_IDENTITY_PAGETABLE_ADDR;
err = -ENOMEM;
-   if (alloc_identity_pagetable(kvm) != 0)
-   goto free_vmcs;
if (!init_rmode_identity_map(kvm))
goto free_vmcs;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8f1e22d..e05bd58 100644
---