Bring all resource functions that are different between the vendors
into resource structure and initialize them dynamically.

Implement these functions separately for each vendors.
update_mba_bw : Feedback loop bandwidth update functionality is not
                needed for AMD.
cbm_validate  : Cache bitmask validate function. AMD allows
                non-contiguous masks. So, use separate functions for
                Intel and AMD.

Signed-off-by: Babu Moger <[email protected]>
---
 arch/x86/kernel/cpu/rdt.c             | 17 +++++++++++++----
 arch/x86/kernel/cpu/rdt.h             | 19 +++++++++++++------
 arch/x86/kernel/cpu/rdt_ctrlmondata.c |  4 ++--
 arch/x86/kernel/cpu/rdt_monitor.c     | 10 +++++++---
 4 files changed, 35 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kernel/cpu/rdt.c b/arch/x86/kernel/cpu/rdt.c
index 6dec45bf81d6..ae26b9b3fafa 100644
--- a/arch/x86/kernel/cpu/rdt.c
+++ b/arch/x86/kernel/cpu/rdt.c
@@ -867,10 +867,19 @@ static __init void rdt_init_res_defs_intel(void)
        struct rdt_resource *r;
 
        for_each_rdt_resource(r) {
-               if (r->rid == RDT_RESOURCE_MBA) {
-                       r->msr_base = IA32_MBA_THRTL_BASE;
-                       r->msr_update = mba_wrmsr;
-                       r->parse_ctrlval = parse_bw;
+               if ((r->rid == RDT_RESOURCE_L3) ||
+                   (r->rid == RDT_RESOURCE_L3DATA) ||
+                   (r->rid == RDT_RESOURCE_L3CODE) ||
+                   (r->rid == RDT_RESOURCE_L2) ||
+                   (r->rid == RDT_RESOURCE_L2DATA) ||
+                   (r->rid == RDT_RESOURCE_L2CODE))
+                       r->cbm_validate = cbm_validate;
+
+               else if (r->rid == RDT_RESOURCE_MBA) {
+                        r->msr_base = IA32_MBA_THRTL_BASE;
+                        r->msr_update = mba_wrmsr;
+                        r->parse_ctrlval = parse_bw;
+                        r->update_mba_bw = update_mba_bw;
                }
        }
 }
diff --git a/arch/x86/kernel/cpu/rdt.h b/arch/x86/kernel/cpu/rdt.h
index 2569c10c37f4..7205157d359b 100644
--- a/arch/x86/kernel/cpu/rdt.h
+++ b/arch/x86/kernel/cpu/rdt.h
@@ -386,9 +386,9 @@ static inline bool is_mbm_event(int e)
  * struct rdt_resource - attributes of an RDT resource
  * @rid:               The index of the resource
  * @alloc_enabled:     Is allocation enabled on this machine
- * @mon_enabled:               Is monitoring enabled for this feature
+ * @mon_enabled:       Is monitoring enabled for this feature
  * @alloc_capable:     Is allocation available on this machine
- * @mon_capable:               Is monitor feature available on this machine
+ * @mon_capable:       Is monitor feature available on this machine
  * @name:              Name to use in "schemata" file
  * @num_closid:                Number of CLOSIDs available
  * @cache_level:       Which cache level defines scope of this resource
@@ -400,10 +400,12 @@ static inline bool is_mbm_event(int e)
  * @cache:             Cache allocation related data
  * @format_str:                Per resource format string to show domain value
  * @parse_ctrlval:     Per resource function pointer to parse control values
- * @evt_list:                  List of monitoring events
- * @num_rmid:                  Number of RMIDs available
- * @mon_scale:                 cqm counter * mon_scale = occupancy in bytes
- * @fflags:                    flags to choose base and info files
+ * @update_mba_bw:     Feedback loop for MBA software controllerer function
+ * @cbm_validate       Cache bitmask validate function
+ * @evt_list:          List of monitoring events
+ * @num_rmid:          Number of RMIDs available
+ * @mon_scale:         cqm counter * mon_scale = occupancy in bytes
+ * @fflags:            flags to choose base and info files
  */
 struct rdt_resource {
        int                     rid;
@@ -425,6 +427,9 @@ struct rdt_resource {
        const char              *format_str;
        int (*parse_ctrlval)    (void *data, struct rdt_resource *r,
                                 struct rdt_domain *d);
+       void (*update_mba_bw)   (struct rdtgroup *rgrp,
+                                struct rdt_domain *dom_mbm);
+       bool (*cbm_validate)    (char *buf, u32 *data, struct rdt_resource *r);
        struct list_head        evt_list;
        int                     num_rmid;
        unsigned int            mon_scale;
@@ -562,5 +567,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, 
unsigned long delay_ms);
 void cqm_handle_limbo(struct work_struct *work);
 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
 void __check_limbo(struct rdt_domain *d, bool force_free);
+void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm);
+bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r);
 
 #endif /* _ASM_X86_RDT_H */
diff --git a/arch/x86/kernel/cpu/rdt_ctrlmondata.c 
b/arch/x86/kernel/cpu/rdt_ctrlmondata.c
index 0565c564b297..5a282b6c4bd7 100644
--- a/arch/x86/kernel/cpu/rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/rdt_ctrlmondata.c
@@ -88,7 +88,7 @@ int parse_bw(void *_buf, struct rdt_resource *r, struct 
rdt_domain *d)
  *     are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
  * Additionally Haswell requires at least two bits set.
  */
-static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
+bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
 {
        unsigned long first_bit, zero_bit, val;
        unsigned int cbm_len = r->cache.cbm_len;
@@ -153,7 +153,7 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct 
rdt_domain *d)
                return -EINVAL;
        }
 
-       if (!cbm_validate(data->buf, &cbm_val, r))
+       if ((r->cbm_validate) && !(r->cbm_validate(data->buf, &cbm_val, r)))
                return -EINVAL;
 
        if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
diff --git a/arch/x86/kernel/cpu/rdt_monitor.c 
b/arch/x86/kernel/cpu/rdt_monitor.c
index 577514cd4a71..0dc0260f10d9 100644
--- a/arch/x86/kernel/cpu/rdt_monitor.c
+++ b/arch/x86/kernel/cpu/rdt_monitor.c
@@ -361,7 +361,7 @@ void mon_event_count(void *info)
  * throttle MSRs already have low percentage values.  To avoid
  * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
  */
-static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 {
        u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
        struct mbm_state *pmbm_data, *cmbm_data;
@@ -520,6 +520,7 @@ void mbm_handle_overflow(struct work_struct *work)
        unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
        struct rdtgroup *prgrp, *crgrp;
        int cpu = smp_processor_id();
+       struct rdt_resource *r_mba;
        struct list_head *head;
        struct rdt_domain *d;
 
@@ -539,8 +540,11 @@ void mbm_handle_overflow(struct work_struct *work)
                list_for_each_entry(crgrp, head, mon.crdtgrp_list)
                        mbm_update(d, crgrp->mon.rmid);
 
-               if (is_mba_sc(NULL))
-                       update_mba_bw(prgrp, d);
+               if (is_mba_sc(NULL)) {
+                       r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
+                       if (r_mba->update_mba_bw)
+                               r_mba->update_mba_bw(prgrp, d);
+               }
        }
 
        schedule_delayed_work_on(cpu, &d->mbm_over, delay);
-- 
2.17.1

Reply via email to