Code for initializing/tearing down PMU for PV guests
Signed-off-by: Boris Ostrovsky boris.ostrov...@oracle.com
Acked-by: Kevin Tian kevin.t...@intel.com
Reviewed-by: Konrad Rzeszutek Wilk konrad.w...@oracle.com
Reviewed-by: Dietmar Hahn dietmar.h...@ts.fujitsu.com
Tested-by: Dietmar Hahn dietmar.h...@ts.fujitsu.com
---
tools/flask/policy/policy/modules/xen/xen.te | 4 ++
xen/arch/x86/domain.c| 2 +
xen/arch/x86/hvm/hvm.c | 1 +
xen/arch/x86/hvm/svm/svm.c | 4 +-
xen/arch/x86/hvm/svm/vpmu.c | 44 +
xen/arch/x86/hvm/vmx/vmx.c | 4 +-
xen/arch/x86/hvm/vmx/vpmu_core2.c| 81 ---
xen/arch/x86/hvm/vpmu.c | 98 +++-
xen/common/event_channel.c | 1 +
xen/include/asm-x86/hvm/vpmu.h | 2 +
xen/include/public/pmu.h | 2 +
xen/include/public/xen.h | 1 +
xen/include/xsm/dummy.h | 3 +
xen/xsm/flask/hooks.c| 4 ++
xen/xsm/flask/policy/access_vectors | 2 +
15 files changed, 212 insertions(+), 41 deletions(-)
diff --git a/tools/flask/policy/policy/modules/xen/xen.te
b/tools/flask/policy/policy/modules/xen/xen.te
index ae7bf3c..9d84004 100644
--- a/tools/flask/policy/policy/modules/xen/xen.te
+++ b/tools/flask/policy/policy/modules/xen/xen.te
@@ -120,6 +120,10 @@ domain_comms(dom0_t, dom0_t)
# Allow all domains to use (unprivileged parts of) the tmem hypercall
allow domain_type xen_t:xen tmem_op;
+# Allow all domains to use PMU (but not to change its settings --- that's what
+# pmu_ctrl is for)
+allow domain_type xen_t:xen2 pmu_use;
+
###
#
# Domain creation
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index da5bdf4..ce1d187 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -445,6 +445,8 @@ int vcpu_initialise(struct vcpu *v)
vmce_init_vcpu(v);
+spin_lock_init(v-arch.vpmu.vpmu_lock);
+
if ( has_hvm_container_domain(d) )
{
rc = hvm_vcpu_initialise(v);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8f49b44..ec9c89a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4820,6 +4820,7 @@ static hvm_hypercall_t *const
pvh_hypercall64_table[NR_hypercalls] = {
HYPERCALL(hvm_op),
HYPERCALL(sysctl),
HYPERCALL(domctl),
+HYPERCALL(xenpmu_op),
[ __HYPERVISOR_arch_1 ] = (hvm_hypercall_t *)paging_domctl_continuation
};
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 8aca6e6..b1c4845 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1157,7 +1157,9 @@ static int svm_vcpu_initialise(struct vcpu *v)
return rc;
}
-vpmu_initialise(v);
+/* PVH's VPMU is initialized via hypercall */
+if ( is_hvm_vcpu(v) )
+vpmu_initialise(v);
svm_guest_osvw_init(v);
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 8460d7b..fe852ed 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -363,17 +363,19 @@ static void amd_vpmu_destroy(struct vcpu *v)
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
-if ( has_hvm_container_vcpu(v) is_msr_bitmap_on(vpmu) )
-amd_vpmu_unset_msr_bitmap(v);
+if ( has_hvm_container_vcpu(v) )
+{
+if ( is_msr_bitmap_on(vpmu) )
+amd_vpmu_unset_msr_bitmap(v);
-xfree(vpmu-context);
-vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
+if ( is_hvm_vcpu(v) )
+xfree(vpmu-context);
-if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
-{
-vpmu_reset(vpmu, VPMU_RUNNING);
release_pmu_ownship(PMU_OWNER_HVM);
}
+
+vpmu-context = NULL;
+vpmu_clear(vpmu);
}
/* VPMU part of the 'q' keyhandler */
@@ -439,15 +441,19 @@ int svm_vpmu_initialise(struct vcpu *v)
if ( !counters )
return -EINVAL;
-ctxt = xzalloc_bytes(sizeof(*ctxt) +
- 2 * sizeof(uint64_t) * num_counters);
-if ( !ctxt )
+if ( is_hvm_vcpu(v) )
{
-printk(XENLOG_G_WARNING Insufficient memory for PMU,
-PMU feature is unavailable on domain %d vcpu %d.\n,
- v-vcpu_id, v-domain-domain_id);
-return -ENOMEM;
+ctxt = xzalloc_bytes(sizeof(*ctxt) +
+ 2 * sizeof(uint64_t) * num_counters);
+if ( !ctxt )
+{
+printk(XENLOG_G_WARNING %pv: Insufficient memory for PMU,
+PMU feature is unavailable\n, v);
+return -ENOMEM;
+}
}
+else
+ctxt = v-arch.vpmu.xenpmu_data-pmu.c.amd;
ctxt-counters = sizeof(*ctxt);
ctxt-ctrls = ctxt-counters + sizeof(uint64_t) * num_counters;
@@ -489,6 +495,16 @@ int