From: Peter Zijlstra <[email protected]>

commit 90c91dfb86d0ff545bd329d3ddd72c147e2ae198 upstream.

Kan and Andi reported that we fail to kill rotation when the flexible
events go empty, but the context does not. XXX moar

Fixes: fd7d55172d1e ("perf/cgroups: Don't rotate events for cgroups 
unnecessarily")
Reported-by: Andi Kleen <[email protected]>
Reported-by: Kan Liang <[email protected]>
Tested-by: Kan Liang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: 
https://lkml.kernel.org/r/[email protected]
Cc: Robin Murphy <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>

---
 kernel/events/core.c |   20 ++++++++++++++------
 1 file changed, 14 insertions(+), 6 deletions(-)

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2171,6 +2171,7 @@ __perf_remove_from_context(struct perf_e
 
        if (!ctx->nr_events && ctx->is_active) {
                ctx->is_active = 0;
+               ctx->rotate_necessary = 0;
                if (ctx->task) {
                        WARN_ON_ONCE(cpuctx->task_ctx != ctx);
                        cpuctx->task_ctx = NULL;
@@ -3047,12 +3048,6 @@ static void ctx_sched_out(struct perf_ev
        if (!ctx->nr_active || !(is_active & EVENT_ALL))
                return;
 
-       /*
-        * If we had been multiplexing, no rotations are necessary, now no 
events
-        * are active.
-        */
-       ctx->rotate_necessary = 0;
-
        perf_pmu_disable(ctx->pmu);
        if (is_active & EVENT_PINNED) {
                list_for_each_entry_safe(event, tmp, &ctx->pinned_active, 
active_list)
@@ -3062,6 +3057,13 @@ static void ctx_sched_out(struct perf_ev
        if (is_active & EVENT_FLEXIBLE) {
                list_for_each_entry_safe(event, tmp, &ctx->flexible_active, 
active_list)
                        group_sched_out(event, cpuctx, ctx);
+
+               /*
+                * Since we cleared EVENT_FLEXIBLE, also clear
+                * rotate_necessary, is will be reset by
+                * ctx_flexible_sched_in() when needed.
+                */
+               ctx->rotate_necessary = 0;
        }
        perf_pmu_enable(ctx->pmu);
 }
@@ -3800,6 +3802,12 @@ ctx_event_to_rotate(struct perf_event_co
                                      typeof(*event), group_node);
        }
 
+       /*
+        * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
+        * finds there are unschedulable events, it will set it again.
+        */
+       ctx->rotate_necessary = 0;
+
        return event;
 }
 


Reply via email to