Intercept accesses to PMU MSRs and process them in VPMU module. Dump VPMU state for all domains (HVM and PV) when requested.
Signed-off-by: Boris Ostrovsky <boris.ostrov...@oracle.com> Acked-by: Jan Beulich <jbeul...@suse.com> Acked-by: Kevin Tian <kevin.t...@intel.com> Reviewed-by: Dietmar Hahn <dietmar.h...@ts.fujitsu.com> Tested-by: Dietmar Hahn <dietmar.h...@ts.fujitsu.com> --- xen/arch/x86/domain.c | 3 +-- xen/arch/x86/hvm/vmx/vpmu_core2.c | 49 ++++++++++++++++++++++++++++++++------ xen/arch/x86/traps.c | 50 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 92 insertions(+), 10 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 0de8a20..37c6371 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -2075,8 +2075,7 @@ void arch_dump_vcpu_info(struct vcpu *v) { paging_dump_vcpu_info(v); - if ( is_hvm_vcpu(v) ) - vpmu_dump(v); + vpmu_dump(v); } void domain_cpuid( diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index 2dd8000..839dce0 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -27,6 +27,7 @@ #include <asm/regs.h> #include <asm/types.h> #include <asm/apic.h> +#include <asm/traps.h> #include <asm/msr.h> #include <asm/msr-index.h> #include <asm/hvm/support.h> @@ -299,12 +300,18 @@ static inline void __core2_vpmu_save(struct vcpu *v) rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, fixed_counters[i]); for ( i = 0; i < arch_pmc_cnt; i++ ) rdmsrl(MSR_IA32_PERFCTR0 + i, xen_pmu_cntr_pair[i].counter); + + if ( !has_hvm_container_vcpu(v) ) + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status); } static int core2_vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); + if ( !has_hvm_container_vcpu(v) ) + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) ) return 0; @@ -342,6 +349,13 @@ static inline void __core2_vpmu_load(struct vcpu *v) wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, core2_vpmu_cxt->fixed_ctrl); wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area); wrmsrl(MSR_IA32_PEBS_ENABLE, core2_vpmu_cxt->pebs_enable); + + if ( !has_hvm_container_vcpu(v) ) + { + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl); + core2_vpmu_cxt->global_ovf_ctrl = 0; + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl); + } } static void core2_vpmu_load(struct vcpu *v) @@ -442,7 +456,6 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index) static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t supported) { - u64 global_ctrl; int i, tmp; int type = -1, index = -1; struct vcpu *v = current; @@ -486,7 +499,12 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, switch ( msr ) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: + if ( msr_content & ~(0xC000000000000000 | + (((1ULL << fixed_pmc_cnt) - 1) << 32) | + ((1ULL << arch_pmc_cnt) - 1)) ) + return 1; core2_vpmu_cxt->global_status &= ~msr_content; + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); return 0; case MSR_CORE_PERF_GLOBAL_STATUS: gdprintk(XENLOG_INFO, "Can not write readonly MSR: " @@ -514,14 +532,18 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n"); return 0; case MSR_CORE_PERF_GLOBAL_CTRL: - global_ctrl = msr_content; + core2_vpmu_cxt->global_ctrl = msr_content; break; case MSR_CORE_PERF_FIXED_CTR_CTRL: if ( msr_content & ( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) ) return 1; - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); + if ( has_hvm_container_vcpu(v) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, + &core2_vpmu_cxt->global_ctrl); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl); *enabled_cntrs &= ~(((1ULL << fixed_pmc_cnt) - 1) << 32); if ( msr_content != 0 ) { @@ -546,7 +568,11 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, if ( msr_content & (~((1ull << 32) - 1)) ) return 1; - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); + if ( has_hvm_container_vcpu(v) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, + &core2_vpmu_cxt->global_ctrl); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl); if ( msr_content & (1ULL << 22) ) *enabled_cntrs |= 1ULL << tmp; @@ -560,9 +586,15 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, if ( type != MSR_TYPE_GLOBAL ) wrmsrl(msr, msr_content); else - vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + { + if ( has_hvm_container_vcpu(v) ) + vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + else + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + } - if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) ) + if ( (core2_vpmu_cxt->global_ctrl & *enabled_cntrs) || + (core2_vpmu_cxt->ds_area != 0) ) vpmu_set(vpmu, VPMU_RUNNING); else vpmu_reset(vpmu, VPMU_RUNNING); @@ -589,7 +621,10 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) *msr_content = core2_vpmu_cxt->global_status; break; case MSR_CORE_PERF_GLOBAL_CTRL: - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + if ( has_hvm_container_vcpu(v) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content); break; default: rdmsrl(msr, *msr_content); diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 10fc2ca..70477b2 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -72,6 +72,7 @@ #include <asm/apic.h> #include <asm/mc146818rtc.h> #include <asm/hpet.h> +#include <asm/hvm/vpmu.h> #include <public/arch-x86/cpuid.h> #include <xsm/xsm.h> @@ -896,8 +897,10 @@ void pv_cpuid(struct cpu_user_regs *regs) __clear_bit(X86_FEATURE_TOPOEXT % 32, &c); break; + case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */ + break; + case 0x00000005: /* MONITOR/MWAIT */ - case 0x0000000a: /* Architectural Performance Monitor Features */ case 0x0000000b: /* Extended Topology Enumeration */ case 0x8000000a: /* SVM revision and features */ case 0x8000001b: /* Instruction Based Sampling */ @@ -913,6 +916,9 @@ void pv_cpuid(struct cpu_user_regs *regs) } out: + /* VPMU may decide to modify some of the leaves */ + vpmu_do_cpuid(regs->eax, &a, &b, &c, &d); + regs->eax = a; regs->ebx = b; regs->ecx = c; @@ -1935,6 +1941,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) char io_emul_stub[32]; void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1))); uint64_t val, msr_content; + bool_t vpmu_msr; if ( !read_descriptor(regs->cs, v, regs, &code_base, &code_limit, &ar, @@ -2425,6 +2432,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) uint32_t eax = regs->eax; uint32_t edx = regs->edx; msr_content = ((uint64_t)edx << 32) | eax; + vpmu_msr = 0; switch ( (u32)regs->ecx ) { case MSR_FS_BASE: @@ -2561,6 +2569,22 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) if ( v->arch.debugreg[7] & DR7_ACTIVE_MASK ) wrmsrl(regs->_ecx, msr_content); break; + case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + { + vpmu_msr = 1; + case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: + if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ) + { + if ( vpmu_do_wrmsr(regs->ecx, msr_content, 0) ) + goto fail; + } + break; + } + /*FALLTHROUGH*/ default: if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 ) @@ -2593,6 +2617,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) break; case 0x32: /* RDMSR */ + vpmu_msr = 0; switch ( (u32)regs->ecx ) { case MSR_FS_BASE: @@ -2663,6 +2688,29 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) [regs->_ecx - MSR_AMD64_DR1_ADDRESS_MASK + 1]; regs->edx = 0; break; + case MSR_IA32_PERF_CAPABILITIES: + /* No extra capabilities are supported */ + regs->eax = regs->edx = 0; + break; + case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + { + vpmu_msr = 1; + case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: + if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ) + { + if ( vpmu_do_rdmsr(regs->ecx, &msr_content) ) + goto fail; + + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); + } + break; + } + /*FALLTHROUGH*/ default: if ( rdmsr_hypervisor_regs(regs->ecx, &val) ) -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel