On Tue, Nov 05, 2013 at 06:01:25PM +0100, Stephane Eranian wrote:
> +static int rapl_cpu_dying(int cpu)
> +{
> +     struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
> +     struct perf_event *event, *tmp;
> +
> +     if (!pmu)
> +             return 0;
> +
> +     spin_lock(&rapl_hotplug_lock);
> +
> +     /*
> +      * stop all syswide RAPL events on that  CPU
> +      * as a consequence also stops the hrtimer
> +      */
> +     list_for_each_entry_safe(event, tmp, &pmu->active_list, active_entry) {
> +             rapl_pmu_event_stop(event, PERF_EF_UPDATE);
> +     }
> +
> +     per_cpu(rapl_pmu, cpu) = NULL;
> +
> +     if (atomic_dec_and_test(&pmu->refcnt))
> +             kfree(pmu);
> +
> +     spin_unlock(&rapl_hotplug_lock);
> +     return 0;
> +}

Could you do an add-on patch similar to the below -- no need to respin
the entire series once again for this.

---
commit 22cc4ccf63e10e361531bf61e6e6c96c53a2f665
Author: Yan, Zheng <[email protected]>
Date:   Tue Apr 16 19:51:05 2013 +0800

    perf/x86: Avoid kfree() in CPU_{STARTING,DYING}
    
    On -rt kfree() can schedule, but CPU_{STARTING,DYING} should be
    atomic. So use a list to defer kfree until CPU_{ONLINE,DEAD}.
    
    Signed-off-by: Yan, Zheng <[email protected]>
    Acked-by: Peter Zijlstra <[email protected]>
    Cc: [email protected]
    Cc: [email protected]
    Cc: [email protected]
    Link: 
http://lkml.kernel.org/r/[email protected]
    Signed-off-by: Ingo Molnar <[email protected]>

diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c 
b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 75da9e18b128..50d4a1c58106 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2622,6 +2622,21 @@ static void __init uncore_pci_exit(void)
        }
 }
 
+/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
+static LIST_HEAD(boxes_to_free);
+
+static void __cpuinit uncore_kfree_boxes(void)
+{
+       struct intel_uncore_box *box;
+
+       while (!list_empty(&boxes_to_free)) {
+               box = list_entry(boxes_to_free.next,
+                                struct intel_uncore_box, list);
+               list_del(&box->list);
+               kfree(box);
+       }
+}
+
 static void __cpuinit uncore_cpu_dying(int cpu)
 {
        struct intel_uncore_type *type;
@@ -2636,7 +2651,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
                        box = *per_cpu_ptr(pmu->box, cpu);
                        *per_cpu_ptr(pmu->box, cpu) = NULL;
                        if (box && atomic_dec_and_test(&box->refcnt))
-                               kfree(box);
+                               list_add(&box->list, &boxes_to_free);
                }
        }
 }
@@ -2666,8 +2681,11 @@ static int __cpuinit uncore_cpu_starting(int cpu)
                                if (exist && exist->phys_id == phys_id) {
                                        atomic_inc(&exist->refcnt);
                                        *per_cpu_ptr(pmu->box, cpu) = exist;
-                                       kfree(box);
-                                       box = NULL;
+                                       if (box) {
+                                               list_add(&box->list,
+                                                        &boxes_to_free);
+                                               box = NULL;
+                                       }
                                        break;
                                }
                        }
@@ -2806,6 +2824,10 @@ static int
        case CPU_DYING:
                uncore_cpu_dying(cpu);
                break;
+       case CPU_ONLINE:
+       case CPU_DEAD:
+               uncore_kfree_boxes();
+               break;
        default:
                break;
        }
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to