Re: [RFC PATCH v6 69/92] kvm: x86: keep the page protected if tracked by the introspection tool

2019-09-10 Thread Adalbert Lazăr
On Tue, 10 Sep 2019 10:26:42 -0400, Konrad Rzeszutek Wilk 
 wrote:
> On Fri, Aug 09, 2019 at 07:00:24PM +0300, Adalbert Lazăr wrote:
> > This patch might be obsolete thanks to single-stepping.
> 
> sooo should it be skipped from this large patchset to easy
> review?

I'll add a couple of warning messages to check if this patch is still
needed, in order to skip it from the next submission (which will be smaller:)

However, on AMD, single-stepping is not an option.

Thanks,
Adalbert

> 
> > 
> > Signed-off-by: Adalbert Lazăr 
> > ---
> >  arch/x86/kvm/x86.c | 9 +++--
> >  1 file changed, 7 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 2c06de73a784..06f44ce8ed07 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -6311,7 +6311,8 @@ static bool reexecute_instruction(struct kvm_vcpu 
> > *vcpu, gva_t cr2,
> > indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
> > spin_unlock(>kvm->mmu_lock);
> >  
> > -   if (indirect_shadow_pages)
> > +   if (indirect_shadow_pages
> > +   && !kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> > kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> >  
> > return true;
> > @@ -6322,7 +6323,8 @@ static bool reexecute_instruction(struct kvm_vcpu 
> > *vcpu, gva_t cr2,
> >  * and it failed try to unshadow page and re-enter the
> >  * guest to let CPU execute the instruction.
> >  */
> > -   kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> > +   if (!kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> > +   kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> >  
> > /*
> >  * If the access faults on its page table, it can not
> > @@ -6374,6 +6376,9 @@ static bool retry_instruction(struct x86_emulate_ctxt 
> > *ctxt,
> > if (!vcpu->arch.mmu->direct_map)
> > gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
> >  
> > +   if (kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> > +   return false;
> > +
> > kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> >  
> > return true;
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [RFC PATCH v6 69/92] kvm: x86: keep the page protected if tracked by the introspection tool

2019-09-10 Thread Konrad Rzeszutek Wilk
On Fri, Aug 09, 2019 at 07:00:24PM +0300, Adalbert Lazăr wrote:
> This patch might be obsolete thanks to single-stepping.

sooo should it be skipped from this large patchset to easy
review?

> 
> Signed-off-by: Adalbert Lazăr 
> ---
>  arch/x86/kvm/x86.c | 9 +++--
>  1 file changed, 7 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2c06de73a784..06f44ce8ed07 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -6311,7 +6311,8 @@ static bool reexecute_instruction(struct kvm_vcpu 
> *vcpu, gva_t cr2,
>   indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
>   spin_unlock(>kvm->mmu_lock);
>  
> - if (indirect_shadow_pages)
> + if (indirect_shadow_pages
> + && !kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
>   kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
>  
>   return true;
> @@ -6322,7 +6323,8 @@ static bool reexecute_instruction(struct kvm_vcpu 
> *vcpu, gva_t cr2,
>* and it failed try to unshadow page and re-enter the
>* guest to let CPU execute the instruction.
>*/
> - kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> + if (!kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> + kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
>  
>   /*
>* If the access faults on its page table, it can not
> @@ -6374,6 +6376,9 @@ static bool retry_instruction(struct x86_emulate_ctxt 
> *ctxt,
>   if (!vcpu->arch.mmu->direct_map)
>   gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
>  
> + if (kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> + return false;
> +
>   kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
>  
>   return true;
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

[RFC PATCH v6 69/92] kvm: x86: keep the page protected if tracked by the introspection tool

2019-08-09 Thread Adalbert Lazăr
This patch might be obsolete thanks to single-stepping.

Signed-off-by: Adalbert Lazăr 
---
 arch/x86/kvm/x86.c | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2c06de73a784..06f44ce8ed07 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6311,7 +6311,8 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, 
gva_t cr2,
indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
spin_unlock(>kvm->mmu_lock);
 
-   if (indirect_shadow_pages)
+   if (indirect_shadow_pages
+   && !kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
 
return true;
@@ -6322,7 +6323,8 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, 
gva_t cr2,
 * and it failed try to unshadow page and re-enter the
 * guest to let CPU execute the instruction.
 */
-   kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
+   if (!kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
+   kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
 
/*
 * If the access faults on its page table, it can not
@@ -6374,6 +6376,9 @@ static bool retry_instruction(struct x86_emulate_ctxt 
*ctxt,
if (!vcpu->arch.mmu->direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
 
+   if (kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
+   return false;
+
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
 
return true;
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization