Expose max_recycle_threshold as a configurable sysfs attribute of
intel_cmt pmu. max_recycle_threshold determines the maximum dirty
threshold for rmids with no zero occupancy.

Signed-off-by: David Carrillo-Cisneros <[email protected]>
---
 arch/x86/events/intel/cmt.c | 57 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 57 insertions(+)

diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index 649eb5f..05803a8 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -61,6 +61,7 @@ static struct pkg_data **cmt_pkgs_data;
  * @__cmt_pre_mon_slice: Min time a monr is monitored before being readable.
  * @__cmt_min_mon_slice: Min time a monr stays monitored after becoming
  *                       readable.
+ * @__cmt_max_threshold: Max bytes of error due to reusing dirty rmids.
  */
 #define CMT_DEFAULT_PRE_MON_SLICE 2000         /* ms */
 static u64 __cmt_pre_mon_slice;
@@ -68,6 +69,7 @@ static u64 __cmt_pre_mon_slice;
 #define CMT_DEFAULT_MIN_MON_SLICE 5000         /* ms */
 static u64 __cmt_min_mon_slice;
 
+static unsigned int __cmt_max_threshold;       /* bytes */
 
 /*
  * If @pkgd == NULL, return first online, pkg_data in cmt_pkgs_data.
@@ -1831,9 +1833,54 @@ static struct attribute_group intel_cmt_format_group = {
        .attrs = intel_cmt_formats_attr,
 };
 
+static ssize_t max_recycle_threshold_show(struct device *dev,
+                               struct device_attribute *attr, char *page)
+{
+       ssize_t rv;
+
+       mutex_lock(&cmt_mutex);
+       rv = snprintf(page, PAGE_SIZE - 1, "%u\n",
+                     READ_ONCE(__cmt_max_threshold));
+
+       mutex_unlock(&cmt_mutex);
+       return rv;
+}
+
+static ssize_t max_recycle_threshold_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       unsigned int bytes;
+       int err;
+
+       err = kstrtouint(buf, 0, &bytes);
+       if (err)
+               return err;
+
+       mutex_lock(&cmt_mutex);
+       monr_hrchy_acquire_mutexes();
+       WRITE_ONCE(__cmt_max_threshold, bytes);
+       monr_hrchy_release_mutexes();
+       mutex_unlock(&cmt_mutex);
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(max_recycle_threshold);
+
+static struct attribute *intel_cmt_attrs[] = {
+       &dev_attr_max_recycle_threshold.attr,
+       NULL,
+};
+
+static const struct attribute_group intel_cmt_group = {
+       .attrs = intel_cmt_attrs,
+};
+
 static const struct attribute_group *intel_cmt_attr_groups[] = {
        &intel_cmt_events_group,
        &intel_cmt_format_group,
+       &intel_cmt_group,
        NULL,
 };
 
@@ -2270,6 +2317,16 @@ static int __init cmt_start(void)
        if (err)
                goto rm_prep;
 
+       /*
+        * A reasonable default upper limit on the max threshold is half of
+        * the number of lines tagged per RMID if all RMIDs had the same
+        * number of lines tagged in the LLC.
+        *
+        * For a 35MB LLC and 56 RMIDs, this is ~0.9% of the LLC or 320 KBs.
+        */
+       __cmt_max_threshold = boot_cpu_data.x86_cache_size * 1024 /
+                       (2 * (__min_max_rmid + 1));
+
        snprintf(scale, sizeof(scale), "%u", cmt_l3_scale);
        str = kstrdup(scale, GFP_KERNEL);
        if (!str) {
-- 
2.8.0.rc3.226.g39d4020

Reply via email to