Hi Peter,

Any comments for the patch set?

Thanks,
Kan
> 
> From: Kan Liang <kan.li...@intel.com>
> 
> There are two free running counters for client IMC uncore. The custom
> event_init() function hardcode their index to 'UNCORE_PMC_IDX_FIXED' and
> 'UNCORE_PMC_IDX_FIXED + 1'. To support the 'UNCORE_PMC_IDX_FIXED +
> 1'
> case, the generic uncore_perf_event_update is obscurely hacked.
> The code quality issue will bring problem when new counter index is
> introduced into generic code. For example, free running counter index.
> 
> Introduce customized event_read function for client IMC uncore.
> The customized function is exactly copied from previous generic
> uncore_pmu_event_read.
> The 'UNCORE_PMC_IDX_FIXED + 1' case will be isolated for client IMC uncore
> only.
> 
> Reviewed-by: Thomas Gleixner <t...@linutronix.de>
> Signed-off-by: Kan Liang <kan.li...@intel.com>
> ---
> 
> Change since V5:
>  - Add reviewed-by
> 
>  arch/x86/events/intel/uncore_snb.c | 33
> +++++++++++++++++++++++++++++++--
>  1 file changed, 31 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/events/intel/uncore_snb.c
> b/arch/x86/events/intel/uncore_snb.c
> index aee5e84..df53521 100644
> --- a/arch/x86/events/intel/uncore_snb.c
> +++ b/arch/x86/events/intel/uncore_snb.c
> @@ -450,6 +450,35 @@ static void snb_uncore_imc_event_start(struct
> perf_event *event, int flags)
>               uncore_pmu_start_hrtimer(box);
>  }
> 
> +static void snb_uncore_imc_event_read(struct perf_event *event) {
> +     struct intel_uncore_box *box = uncore_event_to_box(event);
> +     u64 prev_count, new_count, delta;
> +     int shift;
> +
> +     /*
> +      * There are two free running counters in IMC.
> +      * The index for the second one is hardcoded to
> +      * UNCORE_PMC_IDX_FIXED + 1.
> +      */
> +     if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
> +             shift = 64 - uncore_fixed_ctr_bits(box);
> +     else
> +             shift = 64 - uncore_perf_ctr_bits(box);
> +
> +     /* the hrtimer might modify the previous event value */
> +again:
> +     prev_count = local64_read(&event->hw.prev_count);
> +     new_count = uncore_read_counter(box, event);
> +     if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
> +             goto again;
> +
> +     delta = (new_count << shift) - (prev_count << shift);
> +     delta >>= shift;
> +
> +     local64_add(delta, &event->count);
> +}
> +
>  static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)  {
>       struct intel_uncore_box *box = uncore_event_to_box(event); @@ -
> 472,7 +501,7 @@ static void snb_uncore_imc_event_stop(struct perf_event
> *event, int flags)
>                * Drain the remaining delta count out of a event
>                * that we are disabling:
>                */
> -             uncore_perf_event_update(box, event);
> +             snb_uncore_imc_event_read(event);
>               hwc->state |= PERF_HES_UPTODATE;
>       }
>  }
> @@ -534,7 +563,7 @@ static struct pmu snb_uncore_imc_pmu = {
>       .del            = snb_uncore_imc_event_del,
>       .start          = snb_uncore_imc_event_start,
>       .stop           = snb_uncore_imc_event_stop,
> -     .read           = uncore_pmu_event_read,
> +     .read           = snb_uncore_imc_event_read,
>  };
> 
>  static struct intel_uncore_ops snb_uncore_imc_ops = {
> --
> 2.7.4

Reply via email to