It allows this inline function to be reused by more callers in
more files, such as pmu_intel.c.

Signed-off-by: Like Xu <like...@linux.intel.com>
---
 arch/x86/kvm/pmu.c | 11 -----------
 arch/x86/kvm/pmu.h | 11 +++++++++++
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 8d2873cfec69..0081cb742743 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -476,17 +476,6 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
        kvm_pmu_refresh(vcpu);
 }
 
-static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
-{
-       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
-
-       if (pmc_is_fixed(pmc))
-               return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
-                       pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
-
-       return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
-}
-
 /* Release perf_events for vPMCs that have been unused for a full time slice.  
*/
 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
 {
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index d9157128e6eb..6c902b2d2d5a 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -149,6 +149,17 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, 
u64 counter_value)
        return sample_period;
 }
 
+static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+       if (pmc_is_fixed(pmc))
+               return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
+                       pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
+
+       return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
+}
+
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
-- 
2.29.2

Reply via email to