Re: [PATCH v5 7/7] kvm, mem-hotplug: Unpin and remove nested_vmx->apic_access_page.

2014-09-11 Thread tangchen


On 09/11/2014 05:33 PM, Paolo Bonzini wrote:

This patch is not against the latest KVM tree.  The call to
nested_get_page is now in nested_get_vmcs12_pages, and you have to
handle virtual_apic_page in a similar manner.

Hi Paolo,

Thanks for the reviewing.

This patch-set is against Linux v3.17-rc4.
Will make it against the latest KVM tree, and resend a patch set 
following you comments.


Thanks.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v5 7/7] kvm, mem-hotplug: Unpin and remove nested_vmx->apic_access_page.

2014-09-11 Thread Paolo Bonzini
Il 11/09/2014 07:38, Tang Chen ha scritto:
> Just like we removed kvm_arch->apic_access_page, nested_vmx->apic_access_page
> becomes useless for the same reason. This patch removes 
> nested_vmx->apic_access_page,
> and use gfn_to_page() to pin it in memory when we need it, and unpin it after 
> then.
> 
> Signed-off-by: Tang Chen 
> ---
>  arch/x86/kvm/vmx.c | 31 +--
>  1 file changed, 9 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 058c373..4aa73cb 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -374,11 +374,6 @@ struct nested_vmx {
>   u64 vmcs01_tsc_offset;
>   /* L2 must run next, and mustn't decide to exit to L1. */
>   bool nested_run_pending;
> - /*
> -  * Guest pages referred to in vmcs02 with host-physical pointers, so
> -  * we must keep them pinned while L2 runs.
> -  */
> - struct page *apic_access_page;
>   u64 msr_ia32_feature_control;
>  
>   struct hrtimer preemption_timer;
> @@ -6154,11 +6149,6 @@ static void free_nested(struct vcpu_vmx *vmx)
>   nested_release_vmcs12(vmx);
>   if (enable_shadow_vmcs)
>   free_vmcs(vmx->nested.current_shadow_vmcs);
> - /* Unpin physical memory we referred to in current vmcs02 */
> - if (vmx->nested.apic_access_page) {
> - nested_release_page(vmx->nested.apic_access_page);
> - vmx->nested.apic_access_page = 0;
> - }
>  
>   nested_free_all_saved_vmcss(vmx);
>  }
> @@ -7983,28 +7973,31 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, 
> struct vmcs12 *vmcs12)
>   exec_control |= vmcs12->secondary_vm_exec_control;
>  
>   if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
> + struct page *page;
>   /*
>* Translate L1 physical address to host physical
>* address for vmcs02. Keep the page pinned, so this
>* physical address remains valid. We keep a reference
>* to it so we can release it later.
>*/
> - if (vmx->nested.apic_access_page) /* shouldn't happen */
> - 
> nested_release_page(vmx->nested.apic_access_page);
> - vmx->nested.apic_access_page =
> - nested_get_page(vcpu, vmcs12->apic_access_addr);
> + page = nested_get_page(vcpu, vmcs12->apic_access_addr);
>   /*
>* If translation failed, no matter: This feature asks
>* to exit when accessing the given address, and if it
>* can never be accessed, this feature won't do
>* anything anyway.
>*/
> - if (!vmx->nested.apic_access_page)
> + if (!page)
>   exec_control &=
> ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
>   else
>   vmcs_write64(APIC_ACCESS_ADDR,
> -   page_to_phys(vmx->nested.apic_access_page));
> +  page_to_phys(page));
> + /*
> +  * Do not pin nested vm's apic access page in memory so
> +  * that memory hotplug process is able to migrate it.
> +  */
> + put_page(page);
>   } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
>   struct page *page = gfn_to_page(vmx->vcpu.kvm,
>   APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
> @@ -8807,12 +8800,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, 
> u32 exit_reason,
>   /* This is needed for same reason as it was needed in prepare_vmcs02 */
>   vmx->host_rsp = 0;
>  
> - /* Unpin physical memory we referred to in vmcs02 */
> - if (vmx->nested.apic_access_page) {
> - nested_release_page(vmx->nested.apic_access_page);
> - vmx->nested.apic_access_page = 0;
> - }
> -
>   /*
>* Do not call kvm_reload_apic_access_page() because we are now
>* running, mmu_notifier will force to reload the page's hpa for L2
> 

This patch is not against the latest KVM tree.  The call to
nested_get_page is now in nested_get_vmcs12_pages, and you have to
handle virtual_apic_page in a similar manner.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v5 7/7] kvm, mem-hotplug: Unpin and remove nested_vmx-apic_access_page.

2014-09-11 Thread Paolo Bonzini
Il 11/09/2014 07:38, Tang Chen ha scritto:
 Just like we removed kvm_arch-apic_access_page, nested_vmx-apic_access_page
 becomes useless for the same reason. This patch removes 
 nested_vmx-apic_access_page,
 and use gfn_to_page() to pin it in memory when we need it, and unpin it after 
 then.
 
 Signed-off-by: Tang Chen tangc...@cn.fujitsu.com
 ---
  arch/x86/kvm/vmx.c | 31 +--
  1 file changed, 9 insertions(+), 22 deletions(-)
 
 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
 index 058c373..4aa73cb 100644
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
 @@ -374,11 +374,6 @@ struct nested_vmx {
   u64 vmcs01_tsc_offset;
   /* L2 must run next, and mustn't decide to exit to L1. */
   bool nested_run_pending;
 - /*
 -  * Guest pages referred to in vmcs02 with host-physical pointers, so
 -  * we must keep them pinned while L2 runs.
 -  */
 - struct page *apic_access_page;
   u64 msr_ia32_feature_control;
  
   struct hrtimer preemption_timer;
 @@ -6154,11 +6149,6 @@ static void free_nested(struct vcpu_vmx *vmx)
   nested_release_vmcs12(vmx);
   if (enable_shadow_vmcs)
   free_vmcs(vmx-nested.current_shadow_vmcs);
 - /* Unpin physical memory we referred to in current vmcs02 */
 - if (vmx-nested.apic_access_page) {
 - nested_release_page(vmx-nested.apic_access_page);
 - vmx-nested.apic_access_page = 0;
 - }
  
   nested_free_all_saved_vmcss(vmx);
  }
 @@ -7983,28 +7973,31 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, 
 struct vmcs12 *vmcs12)
   exec_control |= vmcs12-secondary_vm_exec_control;
  
   if (exec_control  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
 + struct page *page;
   /*
* Translate L1 physical address to host physical
* address for vmcs02. Keep the page pinned, so this
* physical address remains valid. We keep a reference
* to it so we can release it later.
*/
 - if (vmx-nested.apic_access_page) /* shouldn't happen */
 - 
 nested_release_page(vmx-nested.apic_access_page);
 - vmx-nested.apic_access_page =
 - nested_get_page(vcpu, vmcs12-apic_access_addr);
 + page = nested_get_page(vcpu, vmcs12-apic_access_addr);
   /*
* If translation failed, no matter: This feature asks
* to exit when accessing the given address, and if it
* can never be accessed, this feature won't do
* anything anyway.
*/
 - if (!vmx-nested.apic_access_page)
 + if (!page)
   exec_control =
 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
   else
   vmcs_write64(APIC_ACCESS_ADDR,
 -   page_to_phys(vmx-nested.apic_access_page));
 +  page_to_phys(page));
 + /*
 +  * Do not pin nested vm's apic access page in memory so
 +  * that memory hotplug process is able to migrate it.
 +  */
 + put_page(page);
   } else if (vm_need_virtualize_apic_accesses(vmx-vcpu.kvm)) {
   struct page *page = gfn_to_page(vmx-vcpu.kvm,
   APIC_DEFAULT_PHYS_BASE  PAGE_SHIFT);
 @@ -8807,12 +8800,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, 
 u32 exit_reason,
   /* This is needed for same reason as it was needed in prepare_vmcs02 */
   vmx-host_rsp = 0;
  
 - /* Unpin physical memory we referred to in vmcs02 */
 - if (vmx-nested.apic_access_page) {
 - nested_release_page(vmx-nested.apic_access_page);
 - vmx-nested.apic_access_page = 0;
 - }
 -
   /*
* Do not call kvm_reload_apic_access_page() because we are now
* running, mmu_notifier will force to reload the page's hpa for L2
 

This patch is not against the latest KVM tree.  The call to
nested_get_page is now in nested_get_vmcs12_pages, and you have to
handle virtual_apic_page in a similar manner.

Paolo
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v5 7/7] kvm, mem-hotplug: Unpin and remove nested_vmx-apic_access_page.

2014-09-11 Thread tangchen


On 09/11/2014 05:33 PM, Paolo Bonzini wrote:

This patch is not against the latest KVM tree.  The call to
nested_get_page is now in nested_get_vmcs12_pages, and you have to
handle virtual_apic_page in a similar manner.

Hi Paolo,

Thanks for the reviewing.

This patch-set is against Linux v3.17-rc4.
Will make it against the latest KVM tree, and resend a patch set 
following you comments.


Thanks.
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v5 7/7] kvm, mem-hotplug: Unpin and remove nested_vmx->apic_access_page.

2014-09-10 Thread Tang Chen
Just like we removed kvm_arch->apic_access_page, nested_vmx->apic_access_page
becomes useless for the same reason. This patch removes 
nested_vmx->apic_access_page,
and use gfn_to_page() to pin it in memory when we need it, and unpin it after 
then.

Signed-off-by: Tang Chen 
---
 arch/x86/kvm/vmx.c | 31 +--
 1 file changed, 9 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 058c373..4aa73cb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -374,11 +374,6 @@ struct nested_vmx {
u64 vmcs01_tsc_offset;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
-   /*
-* Guest pages referred to in vmcs02 with host-physical pointers, so
-* we must keep them pinned while L2 runs.
-*/
-   struct page *apic_access_page;
u64 msr_ia32_feature_control;
 
struct hrtimer preemption_timer;
@@ -6154,11 +6149,6 @@ static void free_nested(struct vcpu_vmx *vmx)
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
-   /* Unpin physical memory we referred to in current vmcs02 */
-   if (vmx->nested.apic_access_page) {
-   nested_release_page(vmx->nested.apic_access_page);
-   vmx->nested.apic_access_page = 0;
-   }
 
nested_free_all_saved_vmcss(vmx);
 }
@@ -7983,28 +7973,31 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, 
struct vmcs12 *vmcs12)
exec_control |= vmcs12->secondary_vm_exec_control;
 
if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
+   struct page *page;
/*
 * Translate L1 physical address to host physical
 * address for vmcs02. Keep the page pinned, so this
 * physical address remains valid. We keep a reference
 * to it so we can release it later.
 */
-   if (vmx->nested.apic_access_page) /* shouldn't happen */
-   
nested_release_page(vmx->nested.apic_access_page);
-   vmx->nested.apic_access_page =
-   nested_get_page(vcpu, vmcs12->apic_access_addr);
+   page = nested_get_page(vcpu, vmcs12->apic_access_addr);
/*
 * If translation failed, no matter: This feature asks
 * to exit when accessing the given address, and if it
 * can never be accessed, this feature won't do
 * anything anyway.
 */
-   if (!vmx->nested.apic_access_page)
+   if (!page)
exec_control &=
  ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
else
vmcs_write64(APIC_ACCESS_ADDR,
- page_to_phys(vmx->nested.apic_access_page));
+page_to_phys(page));
+   /*
+* Do not pin nested vm's apic access page in memory so
+* that memory hotplug process is able to migrate it.
+*/
+   put_page(page);
} else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
struct page *page = gfn_to_page(vmx->vcpu.kvm,
APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
@@ -8807,12 +8800,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 
exit_reason,
/* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx->host_rsp = 0;
 
-   /* Unpin physical memory we referred to in vmcs02 */
-   if (vmx->nested.apic_access_page) {
-   nested_release_page(vmx->nested.apic_access_page);
-   vmx->nested.apic_access_page = 0;
-   }
-
/*
 * Do not call kvm_reload_apic_access_page() because we are now
 * running, mmu_notifier will force to reload the page's hpa for L2
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v5 7/7] kvm, mem-hotplug: Unpin and remove nested_vmx-apic_access_page.

2014-09-10 Thread Tang Chen
Just like we removed kvm_arch-apic_access_page, nested_vmx-apic_access_page
becomes useless for the same reason. This patch removes 
nested_vmx-apic_access_page,
and use gfn_to_page() to pin it in memory when we need it, and unpin it after 
then.

Signed-off-by: Tang Chen tangc...@cn.fujitsu.com
---
 arch/x86/kvm/vmx.c | 31 +--
 1 file changed, 9 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 058c373..4aa73cb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -374,11 +374,6 @@ struct nested_vmx {
u64 vmcs01_tsc_offset;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
-   /*
-* Guest pages referred to in vmcs02 with host-physical pointers, so
-* we must keep them pinned while L2 runs.
-*/
-   struct page *apic_access_page;
u64 msr_ia32_feature_control;
 
struct hrtimer preemption_timer;
@@ -6154,11 +6149,6 @@ static void free_nested(struct vcpu_vmx *vmx)
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx-nested.current_shadow_vmcs);
-   /* Unpin physical memory we referred to in current vmcs02 */
-   if (vmx-nested.apic_access_page) {
-   nested_release_page(vmx-nested.apic_access_page);
-   vmx-nested.apic_access_page = 0;
-   }
 
nested_free_all_saved_vmcss(vmx);
 }
@@ -7983,28 +7973,31 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, 
struct vmcs12 *vmcs12)
exec_control |= vmcs12-secondary_vm_exec_control;
 
if (exec_control  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
+   struct page *page;
/*
 * Translate L1 physical address to host physical
 * address for vmcs02. Keep the page pinned, so this
 * physical address remains valid. We keep a reference
 * to it so we can release it later.
 */
-   if (vmx-nested.apic_access_page) /* shouldn't happen */
-   
nested_release_page(vmx-nested.apic_access_page);
-   vmx-nested.apic_access_page =
-   nested_get_page(vcpu, vmcs12-apic_access_addr);
+   page = nested_get_page(vcpu, vmcs12-apic_access_addr);
/*
 * If translation failed, no matter: This feature asks
 * to exit when accessing the given address, and if it
 * can never be accessed, this feature won't do
 * anything anyway.
 */
-   if (!vmx-nested.apic_access_page)
+   if (!page)
exec_control =
  ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
else
vmcs_write64(APIC_ACCESS_ADDR,
- page_to_phys(vmx-nested.apic_access_page));
+page_to_phys(page));
+   /*
+* Do not pin nested vm's apic access page in memory so
+* that memory hotplug process is able to migrate it.
+*/
+   put_page(page);
} else if (vm_need_virtualize_apic_accesses(vmx-vcpu.kvm)) {
struct page *page = gfn_to_page(vmx-vcpu.kvm,
APIC_DEFAULT_PHYS_BASE  PAGE_SHIFT);
@@ -8807,12 +8800,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 
exit_reason,
/* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx-host_rsp = 0;
 
-   /* Unpin physical memory we referred to in vmcs02 */
-   if (vmx-nested.apic_access_page) {
-   nested_release_page(vmx-nested.apic_access_page);
-   vmx-nested.apic_access_page = 0;
-   }
-
/*
 * Do not call kvm_reload_apic_access_page() because we are now
 * running, mmu_notifier will force to reload the page's hpa for L2
-- 
1.8.3.1

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/