Re: [Xen-devel] [PATCH v15 13/21] x86/VPMU: Initialize PMU for PV(H) guests

2014-11-25 Thread Jan Beulich
 On 17.11.14 at 00:07, boris.ostrov...@oracle.com wrote:
 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
 @@ -362,24 +362,34 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
  struct xen_pmu_intel_ctxt *core2_vpmu_cxt = NULL;
  uint64_t *p = NULL;
  
 -if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
 -return 0;
 -
 -wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
 -if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
 +p = xzalloc_bytes(sizeof(uint64_t));

xzalloc(uint64_t)?

 +if ( !p )
  goto out_err;
  
 -if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
 -goto out_err;
 -vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
 -
 -core2_vpmu_cxt = xzalloc_bytes(sizeof(*core2_vpmu_cxt) +
 -   sizeof(uint64_t) * fixed_pmc_cnt +
 -   sizeof(struct xen_pmu_cntr_pair) *
 -   arch_pmc_cnt);
 -p = xzalloc(uint64_t);

At least it was that way before...

With that fixed, feel free to add
Acked-by: Jan Beulich jbeul...@suse.com

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH v15 13/21] x86/VPMU: Initialize PMU for PV(H) guests

2014-11-16 Thread Boris Ostrovsky
Code for initializing/tearing down PMU for PV guests

Signed-off-by: Boris Ostrovsky boris.ostrov...@oracle.com
Acked-by: Kevin Tian kevin.t...@intel.com
Reviewed-by: Konrad Rzeszutek Wilk konrad.w...@oracle.com
Reviewed-by: Dietmar Hahn dietmar.h...@ts.fujitsu.com
Tested-by: Dietmar Hahn dietmar.h...@ts.fujitsu.com
---
 tools/flask/policy/policy/modules/xen/xen.te |  4 ++
 xen/arch/x86/domain.c|  2 +
 xen/arch/x86/hvm/hvm.c   |  1 +
 xen/arch/x86/hvm/svm/svm.c   |  4 +-
 xen/arch/x86/hvm/svm/vpmu.c  | 44 +
 xen/arch/x86/hvm/vmx/vmx.c   |  4 +-
 xen/arch/x86/hvm/vmx/vpmu_core2.c| 81 ---
 xen/arch/x86/hvm/vpmu.c  | 98 +++-
 xen/common/event_channel.c   |  1 +
 xen/include/asm-x86/hvm/vpmu.h   |  2 +
 xen/include/public/pmu.h |  2 +
 xen/include/public/xen.h |  1 +
 xen/include/xsm/dummy.h  |  3 +
 xen/xsm/flask/hooks.c|  4 ++
 xen/xsm/flask/policy/access_vectors  |  2 +
 15 files changed, 212 insertions(+), 41 deletions(-)

diff --git a/tools/flask/policy/policy/modules/xen/xen.te 
b/tools/flask/policy/policy/modules/xen/xen.te
index ae7bf3c..9d84004 100644
--- a/tools/flask/policy/policy/modules/xen/xen.te
+++ b/tools/flask/policy/policy/modules/xen/xen.te
@@ -120,6 +120,10 @@ domain_comms(dom0_t, dom0_t)
 # Allow all domains to use (unprivileged parts of) the tmem hypercall
 allow domain_type xen_t:xen tmem_op;
 
+# Allow all domains to use PMU (but not to change its settings --- that's what
+# pmu_ctrl is for)
+allow domain_type xen_t:xen2 pmu_use;
+
 ###
 #
 # Domain creation
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index da5bdf4..ce1d187 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -445,6 +445,8 @@ int vcpu_initialise(struct vcpu *v)
 
 vmce_init_vcpu(v);
 
+spin_lock_init(v-arch.vpmu.vpmu_lock);
+
 if ( has_hvm_container_domain(d) )
 {
 rc = hvm_vcpu_initialise(v);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8f49b44..ec9c89a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4820,6 +4820,7 @@ static hvm_hypercall_t *const 
pvh_hypercall64_table[NR_hypercalls] = {
 HYPERCALL(hvm_op),
 HYPERCALL(sysctl),
 HYPERCALL(domctl),
+HYPERCALL(xenpmu_op),
 [ __HYPERVISOR_arch_1 ] = (hvm_hypercall_t *)paging_domctl_continuation
 };
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 8aca6e6..b1c4845 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1157,7 +1157,9 @@ static int svm_vcpu_initialise(struct vcpu *v)
 return rc;
 }
 
-vpmu_initialise(v);
+/* PVH's VPMU is initialized via hypercall */
+if ( is_hvm_vcpu(v) )
+vpmu_initialise(v);
 
 svm_guest_osvw_init(v);
 
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 8460d7b..fe852ed 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -363,17 +363,19 @@ static void amd_vpmu_destroy(struct vcpu *v)
 if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
 return;
 
-if ( has_hvm_container_vcpu(v)  is_msr_bitmap_on(vpmu) )
-amd_vpmu_unset_msr_bitmap(v);
+if ( has_hvm_container_vcpu(v) )
+{
+if ( is_msr_bitmap_on(vpmu) )
+amd_vpmu_unset_msr_bitmap(v);
 
-xfree(vpmu-context);
-vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
+if ( is_hvm_vcpu(v) )
+xfree(vpmu-context);
 
-if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
-{
-vpmu_reset(vpmu, VPMU_RUNNING);
 release_pmu_ownship(PMU_OWNER_HVM);
 }
+
+vpmu-context = NULL;
+vpmu_clear(vpmu);
 }
 
 /* VPMU part of the 'q' keyhandler */
@@ -439,15 +441,19 @@ int svm_vpmu_initialise(struct vcpu *v)
 if ( !counters )
 return -EINVAL;
 
-ctxt = xzalloc_bytes(sizeof(*ctxt) +
- 2 * sizeof(uint64_t) * num_counters);
-if ( !ctxt )
+if ( is_hvm_vcpu(v) )
 {
-printk(XENLOG_G_WARNING Insufficient memory for PMU, 
-PMU feature is unavailable on domain %d vcpu %d.\n,
-   v-vcpu_id, v-domain-domain_id);
-return -ENOMEM;
+ctxt = xzalloc_bytes(sizeof(*ctxt) +
+ 2 * sizeof(uint64_t) * num_counters);
+if ( !ctxt )
+{
+printk(XENLOG_G_WARNING %pv: Insufficient memory for PMU, 
+PMU feature is unavailable\n, v);
+return -ENOMEM;
+}
 }
+else
+ctxt = v-arch.vpmu.xenpmu_data-pmu.c.amd;
 
 ctxt-counters = sizeof(*ctxt);
 ctxt-ctrls = ctxt-counters + sizeof(uint64_t) * num_counters;
@@ -489,6 +495,16 @@ int