On Fri, May 22, 2015 at 03:29:07PM +0200, Peter Zijlstra wrote:
> @@ -696,11 +699,16 @@ static bool __perf_sched_find_counter(st
>                               goto done;
>               }
>       }
> +
>       /* Grab the first unused counter starting with idx */
>       idx = sched->state.counter;
>       for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
> +             if (!__test_and_set_bit(idx, sched->state.used)) {
> +                     if (sched->state.nr_gp++ >= sched->max_gp)
> +                             return false;

Note the placement inside the success path of the GP allocation, instead
of the attempt a GP place we had before.

> +
>                       goto done;
> +             }
>       }
>  
>       return false;




> @@ -2000,6 +1999,11 @@ intel_get_excl_constraints(struct cpu_hw
>        * across HT threads
>        */
>       is_excl = c->flags & PERF_X86_EVENT_EXCL;
> +     if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
> +             event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
> +             if (!cpuc->n_excl++)
> +                     WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
> +     }
>  
>       /*
>        * xl = state of current HT

And that is what keeps repeated get_event_constraints() calls from ever
increasing our n_excl count.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to