[PATCH v14 07/11] KVM: vmx/pmu: Reduce the overhead of LBR pass-through or cancellation

2021-01-31 Thread Like Xu
When the LBR records msrs has already been pass-through, there is no
need to call vmx_update_intercept_for_lbr_msrs() again and again, and
vice versa.

Signed-off-by: Like Xu 
Reviewed-by: Andi Kleen 
---
 arch/x86/kvm/vmx/pmu_intel.c | 13 +
 arch/x86/kvm/vmx/vmx.h   |  3 +++
 2 files changed, 16 insertions(+)

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 287fc14f0445..60f395e18446 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -550,6 +550,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
vcpu->arch.perf_capabilities = 0;
lbr_desc->records.nr = 0;
lbr_desc->event = NULL;
+   lbr_desc->msr_passthrough = false;
 }
 
 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
@@ -596,12 +597,24 @@ static void vmx_update_intercept_for_lbr_msrs(struct 
kvm_vcpu *vcpu, bool set)
 
 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
 {
+   struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+   if (!lbr_desc->msr_passthrough)
+   return;
+
vmx_update_intercept_for_lbr_msrs(vcpu, true);
+   lbr_desc->msr_passthrough = false;
 }
 
 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
 {
+   struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+   if (lbr_desc->msr_passthrough)
+   return;
+
vmx_update_intercept_for_lbr_msrs(vcpu, false);
+   lbr_desc->msr_passthrough = true;
 }
 
 /*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 863bb3fe73d4..4d6a2624a204 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -90,6 +90,9 @@ struct lbr_desc {
 * The records may be inaccurate if the host reclaims the LBR.
 */
struct perf_event *event;
+
+   /* True if LBRs are marked as not intercepted in the MSR bitmap */
+   bool msr_passthrough;
 };
 
 /*
-- 
2.29.2



[PATCH v14 07/11] KVM: vmx/pmu: Reduce the overhead of LBR pass-through or cancellation

2021-01-31 Thread Like Xu
When the LBR records msrs has already been pass-through, there is no
need to call vmx_update_intercept_for_lbr_msrs() again and again, and
vice versa.

Signed-off-by: Like Xu 
Reviewed-by: Andi Kleen 
---
 arch/x86/kvm/vmx/pmu_intel.c | 13 +
 arch/x86/kvm/vmx/vmx.h   |  3 +++
 2 files changed, 16 insertions(+)

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 287fc14f0445..60f395e18446 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -550,6 +550,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
vcpu->arch.perf_capabilities = 0;
lbr_desc->records.nr = 0;
lbr_desc->event = NULL;
+   lbr_desc->msr_passthrough = false;
 }
 
 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
@@ -596,12 +597,24 @@ static void vmx_update_intercept_for_lbr_msrs(struct 
kvm_vcpu *vcpu, bool set)
 
 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
 {
+   struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+   if (!lbr_desc->msr_passthrough)
+   return;
+
vmx_update_intercept_for_lbr_msrs(vcpu, true);
+   lbr_desc->msr_passthrough = false;
 }
 
 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
 {
+   struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+   if (lbr_desc->msr_passthrough)
+   return;
+
vmx_update_intercept_for_lbr_msrs(vcpu, false);
+   lbr_desc->msr_passthrough = true;
 }
 
 /*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 863bb3fe73d4..4d6a2624a204 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -90,6 +90,9 @@ struct lbr_desc {
 * The records may be inaccurate if the host reclaims the LBR.
 */
struct perf_event *event;
+
+   /* True if LBRs are marked as not intercepted in the MSR bitmap */
+   bool msr_passthrough;
 };
 
 /*
-- 
2.29.2