[PATCH v2 16/32] perf/x86/intel/cqm: add cgroup support

2016-05-11 Thread David Carrillo-Cisneros
Create a monr per monitored cgroup. Inserts monrs in the monr hierarchy.
Task events are leaves of the lowest monitored ancestor cgroup (the lowest
cgroup ancestor with a monr).

CQM starts after the cgroup subsystem, and uses the cqm_initialized_key
static key to avoid interfering with the perf cgroup logic until
propertly initialized. The cgroup_init_mutex protects the initialization.

Reviewed-by: Stephane Eranian 
Signed-off-by: David Carrillo-Cisneros 
---
 arch/x86/events/intel/cqm.c   | 595 +-
 arch/x86/events/intel/cqm.h   |  16 +
 arch/x86/include/asm/perf_event.h |  32 ++
 3 files changed, 639 insertions(+), 4 deletions(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 0771154..fb62bac 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -89,6 +89,13 @@ struct monr *monr_hrchy_root;
 
 struct pkg_data **cqm_pkgs_data;
 
+/*
+ * Synchronizes initialization of cqm with cgroups.
+ */
+static DEFINE_MUTEX(cqm_init_mutex);
+
+DEFINE_STATIC_KEY_FALSE(cqm_initialized_key);
+
 static inline bool __pmonr__in_astate(struct pmonr *pmonr)
 {
lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
@@ -119,6 +126,9 @@ static inline bool __pmonr__in_instate(struct pmonr *pmonr)
return __pmonr__in_istate(pmonr) && !__pmonr__in_ilstate(pmonr);
 }
 
+/* Whether the monr is root. Recall that the cgroups can not be root and yet
+ * point to a root monr.
+ */
 static inline bool monr__is_root(struct monr *monr)
 {
return monr_hrchy_root == monr;
@@ -165,6 +175,19 @@ static inline void __monr__clear_mon_active(struct monr 
*monr)
monr->flags &= ~MONR_MON_ACTIVE;
 }
 
+static inline bool monr_is_event_type(struct monr *monr)
+{
+   return !monr->mon_cgrp && monr->mon_event_group;
+}
+
+#ifdef CONFIG_CGROUP_PERF
+static inline struct cgroup_subsys_state *get_root_perf_css(void)
+{
+   /* Get css for root cgroup */
+   return  init_css_set.subsys[perf_event_cgrp_id];
+}
+#endif
+
 static inline bool __valid_pkg_id(u16 pkg_id)
 {
return pkg_id < topology_max_packages();
@@ -706,6 +729,7 @@ static struct monr *monr_alloc(void)
monr->parent = NULL;
INIT_LIST_HEAD(>children);
INIT_LIST_HEAD(>parent_entry);
+   monr->mon_cgrp = NULL;
monr->mon_event_group = NULL;
 
monr->pmonrs = kmalloc(
@@ -934,7 +958,7 @@ retry:
 }
 
 /*
- * Wrappers for monr manipulation in events.
+ * Wrappers for monr manipulation in events and cgroups.
  *
  */
 static inline struct monr *monr_from_event(struct perf_event *event)
@@ -947,6 +971,100 @@ static inline void event_set_monr(struct perf_event 
*event, struct monr *monr)
WRITE_ONCE(event->hw.cqm_monr, monr);
 }
 
+#ifdef CONFIG_CGROUP_PERF
+static inline struct monr *monr_from_perf_cgroup(struct perf_cgroup *cgrp)
+{
+   struct monr *monr;
+   struct cgrp_cqm_info *cqm_info;
+
+   cqm_info = (struct cgrp_cqm_info *)READ_ONCE(cgrp->arch_info);
+   WARN_ON_ONCE(!cqm_info);
+   monr = READ_ONCE(cqm_info->monr);
+   return monr;
+}
+
+static inline struct perf_cgroup *monr__get_mon_cgrp(struct monr *monr)
+{
+   WARN_ON_ONCE(!monr);
+   return READ_ONCE(monr->mon_cgrp);
+}
+
+static inline void
+monr__set_mon_cgrp(struct monr *monr, struct perf_cgroup *cgrp)
+{
+   WRITE_ONCE(monr->mon_cgrp, cgrp);
+}
+
+static inline void
+perf_cgroup_set_monr(struct perf_cgroup *cgrp, struct monr *monr)
+{
+   WRITE_ONCE(cgrp_to_cqm_info(cgrp)->monr, monr);
+}
+
+/*
+ * A perf_cgroup is monitored when it's set in a monr->mon_cgrp.
+ * There is a many-to-one relationship between perf_cgroup's monrs
+ * and monrs' mon_cgrp. A monitored cgroup is necesarily referenced
+ * back by its monr's mon_cgrp.
+ */
+static inline bool perf_cgroup_is_monitored(struct perf_cgroup *cgrp)
+{
+   struct monr *monr;
+   struct perf_cgroup *monr_cgrp;
+
+   /* monr can be referenced by a cgroup other than the one in its
+* mon_cgrp, be careful.
+*/
+   monr = monr_from_perf_cgroup(cgrp);
+
+   monr_cgrp = monr__get_mon_cgrp(monr);
+   /* Root monr do not have a cgroup associated before initialization.
+* mon_cgrp and mon_event_group are union, so the pointer must be set
+* for all non-root monrs.
+*/
+   return  monr_cgrp && monr__get_mon_cgrp(monr) == cgrp;
+}
+
+/* Set css's monr to the monr of its lowest monitored ancestor. */
+static inline void __css_set_monr_to_lma(struct cgroup_subsys_state *css)
+{
+   lockdep_assert_held(_mutex);
+   if (!css->parent) {
+   perf_cgroup_set_monr(css_to_perf_cgroup(css), monr_hrchy_root);
+   return;
+   }
+   perf_cgroup_set_monr(
+   css_to_perf_cgroup(css),
+   monr_from_perf_cgroup(css_to_perf_cgroup(css->parent)));
+}
+
+static inline void
+perf_cgroup_make_monitored(struct 

[PATCH v2 16/32] perf/x86/intel/cqm: add cgroup support

2016-05-11 Thread David Carrillo-Cisneros
Create a monr per monitored cgroup. Inserts monrs in the monr hierarchy.
Task events are leaves of the lowest monitored ancestor cgroup (the lowest
cgroup ancestor with a monr).

CQM starts after the cgroup subsystem, and uses the cqm_initialized_key
static key to avoid interfering with the perf cgroup logic until
propertly initialized. The cgroup_init_mutex protects the initialization.

Reviewed-by: Stephane Eranian 
Signed-off-by: David Carrillo-Cisneros 
---
 arch/x86/events/intel/cqm.c   | 595 +-
 arch/x86/events/intel/cqm.h   |  16 +
 arch/x86/include/asm/perf_event.h |  32 ++
 3 files changed, 639 insertions(+), 4 deletions(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 0771154..fb62bac 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -89,6 +89,13 @@ struct monr *monr_hrchy_root;
 
 struct pkg_data **cqm_pkgs_data;
 
+/*
+ * Synchronizes initialization of cqm with cgroups.
+ */
+static DEFINE_MUTEX(cqm_init_mutex);
+
+DEFINE_STATIC_KEY_FALSE(cqm_initialized_key);
+
 static inline bool __pmonr__in_astate(struct pmonr *pmonr)
 {
lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
@@ -119,6 +126,9 @@ static inline bool __pmonr__in_instate(struct pmonr *pmonr)
return __pmonr__in_istate(pmonr) && !__pmonr__in_ilstate(pmonr);
 }
 
+/* Whether the monr is root. Recall that the cgroups can not be root and yet
+ * point to a root monr.
+ */
 static inline bool monr__is_root(struct monr *monr)
 {
return monr_hrchy_root == monr;
@@ -165,6 +175,19 @@ static inline void __monr__clear_mon_active(struct monr 
*monr)
monr->flags &= ~MONR_MON_ACTIVE;
 }
 
+static inline bool monr_is_event_type(struct monr *monr)
+{
+   return !monr->mon_cgrp && monr->mon_event_group;
+}
+
+#ifdef CONFIG_CGROUP_PERF
+static inline struct cgroup_subsys_state *get_root_perf_css(void)
+{
+   /* Get css for root cgroup */
+   return  init_css_set.subsys[perf_event_cgrp_id];
+}
+#endif
+
 static inline bool __valid_pkg_id(u16 pkg_id)
 {
return pkg_id < topology_max_packages();
@@ -706,6 +729,7 @@ static struct monr *monr_alloc(void)
monr->parent = NULL;
INIT_LIST_HEAD(>children);
INIT_LIST_HEAD(>parent_entry);
+   monr->mon_cgrp = NULL;
monr->mon_event_group = NULL;
 
monr->pmonrs = kmalloc(
@@ -934,7 +958,7 @@ retry:
 }
 
 /*
- * Wrappers for monr manipulation in events.
+ * Wrappers for monr manipulation in events and cgroups.
  *
  */
 static inline struct monr *monr_from_event(struct perf_event *event)
@@ -947,6 +971,100 @@ static inline void event_set_monr(struct perf_event 
*event, struct monr *monr)
WRITE_ONCE(event->hw.cqm_monr, monr);
 }
 
+#ifdef CONFIG_CGROUP_PERF
+static inline struct monr *monr_from_perf_cgroup(struct perf_cgroup *cgrp)
+{
+   struct monr *monr;
+   struct cgrp_cqm_info *cqm_info;
+
+   cqm_info = (struct cgrp_cqm_info *)READ_ONCE(cgrp->arch_info);
+   WARN_ON_ONCE(!cqm_info);
+   monr = READ_ONCE(cqm_info->monr);
+   return monr;
+}
+
+static inline struct perf_cgroup *monr__get_mon_cgrp(struct monr *monr)
+{
+   WARN_ON_ONCE(!monr);
+   return READ_ONCE(monr->mon_cgrp);
+}
+
+static inline void
+monr__set_mon_cgrp(struct monr *monr, struct perf_cgroup *cgrp)
+{
+   WRITE_ONCE(monr->mon_cgrp, cgrp);
+}
+
+static inline void
+perf_cgroup_set_monr(struct perf_cgroup *cgrp, struct monr *monr)
+{
+   WRITE_ONCE(cgrp_to_cqm_info(cgrp)->monr, monr);
+}
+
+/*
+ * A perf_cgroup is monitored when it's set in a monr->mon_cgrp.
+ * There is a many-to-one relationship between perf_cgroup's monrs
+ * and monrs' mon_cgrp. A monitored cgroup is necesarily referenced
+ * back by its monr's mon_cgrp.
+ */
+static inline bool perf_cgroup_is_monitored(struct perf_cgroup *cgrp)
+{
+   struct monr *monr;
+   struct perf_cgroup *monr_cgrp;
+
+   /* monr can be referenced by a cgroup other than the one in its
+* mon_cgrp, be careful.
+*/
+   monr = monr_from_perf_cgroup(cgrp);
+
+   monr_cgrp = monr__get_mon_cgrp(monr);
+   /* Root monr do not have a cgroup associated before initialization.
+* mon_cgrp and mon_event_group are union, so the pointer must be set
+* for all non-root monrs.
+*/
+   return  monr_cgrp && monr__get_mon_cgrp(monr) == cgrp;
+}
+
+/* Set css's monr to the monr of its lowest monitored ancestor. */
+static inline void __css_set_monr_to_lma(struct cgroup_subsys_state *css)
+{
+   lockdep_assert_held(_mutex);
+   if (!css->parent) {
+   perf_cgroup_set_monr(css_to_perf_cgroup(css), monr_hrchy_root);
+   return;
+   }
+   perf_cgroup_set_monr(
+   css_to_perf_cgroup(css),
+   monr_from_perf_cgroup(css_to_perf_cgroup(css->parent)));
+}
+
+static inline void
+perf_cgroup_make_monitored(struct perf_cgroup *cgrp, struct monr *monr)
+{
+