From: Kan Liang <[email protected]>

A ucode patch is also needed for Goldmont while counter freezing feature
is enabled. Otherwise, there will be some issues, e.g. PMI lost.

Add a quirk to check microcode version. If the system starts with the
wrong ucode, leave the counter-freezing feature permanently disabled.

The quirk function for Goldmont is similar as Goldmont Plus. Reuse the
quirk function and rename it to atom_v4.

Signed-off-by: Kan Liang <[email protected]>
---
 arch/x86/events/intel/core.c | 44 +++++++++++++++++++++++++++++++++++---------
 1 file changed, 35 insertions(+), 9 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ab01ef9..56401bc 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3839,23 +3839,48 @@ static __init void intel_nehalem_quirk(void)
        }
 }
 
-static bool intel_glp_counter_freezing_broken(int cpu)
+static bool intel_atom_v4_counter_freezing_broken(int cpu)
 {
        u32 rev = UINT_MAX; /* default to broken for unknown stepping */
 
-       switch (cpu_data(cpu).x86_stepping) {
-       case 1:
-               rev = 0x28;
+       switch (cpu_data(cpu).x86_model) {
+       case INTEL_FAM6_ATOM_GOLDMONT:
+               switch (cpu_data(cpu).x86_stepping) {
+               case 2:
+                       rev = 0xe;
+                       break;
+               case 9:
+                       rev = 0x2e;
+                       break;
+               case 10:
+                       rev = 0x8;
+                       break;
+               }
                break;
-       case 8:
-               rev = 0x6;
+
+       case INTEL_FAM6_ATOM_GOLDMONT_X:
+               switch (cpu_data(cpu).x86_stepping) {
+               case 1:
+                       rev = 0x1a;
+                       break;
+               }
                break;
+
+       case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+               switch (cpu_data(cpu).x86_stepping) {
+               case 1:
+                       rev = 0x28;
+                       break;
+               case 8:
+                       rev = 0x6;
+                       break;
+               }
        }
 
        return (cpu_data(cpu).microcode < rev);
 }
 
-static __init void intel_glp_counter_freezing_quirk(void)
+static __init void intel_atom_v4_counter_freezing_quirk(void)
 {
        /* Check if it's already disabled */
        if (disable_counter_freezing)
@@ -3865,7 +3890,7 @@ static __init void intel_glp_counter_freezing_quirk(void)
         * If the system starts with the wrong ucode, leave the
         * counter-freezing feature permanently disabled.
         */
-       if (intel_glp_counter_freezing_broken(raw_smp_processor_id())) {
+       if (intel_atom_v4_counter_freezing_broken(raw_smp_processor_id())) {
                pr_info("PMU counter freezing disabled due to CPU errata,"
                        "please upgrade microcode\n");
                x86_pmu.counter_freezing = false;
@@ -4196,6 +4221,7 @@ __init int intel_pmu_init(void)
 
        case INTEL_FAM6_ATOM_GOLDMONT:
        case INTEL_FAM6_ATOM_GOLDMONT_X:
+               x86_add_quirk(intel_atom_v4_counter_freezing_quirk);
                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -4222,7 +4248,7 @@ __init int intel_pmu_init(void)
                break;
 
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
-               x86_add_quirk(intel_glp_counter_freezing_quirk);
+               x86_add_quirk(intel_atom_v4_counter_freezing_quirk);
                memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
-- 
2.7.4

Reply via email to