On Tue, Jun 08, 2021 at 05:26:58PM +0530, Kajol Jain wrote:
> +static int nvdimm_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
> +{
> +     struct nvdimm_pmu *nd_pmu;
> +     u32 target;
> +     int nodeid;
> +     const struct cpumask *cpumask;
> +
> +     nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
> +
> +     /* Clear it, incase given cpu is set in nd_pmu->arch_cpumask */
> +     cpumask_test_and_clear_cpu(cpu, &nd_pmu->arch_cpumask);
> +
> +     /*
> +      * If given cpu is not same as current designated cpu for
> +      * counter access, just return.
> +      */
> +     if (cpu != nd_pmu->cpu)
> +             return 0;
> +
> +     /* Check for any active cpu in nd_pmu->arch_cpumask */
> +     target = cpumask_any(&nd_pmu->arch_cpumask);
> +     nd_pmu->cpu = target;
> +
> +     /*
> +      * Incase we don't have any active cpu in nd_pmu->arch_cpumask,
> +      * check in given cpu's numa node list.
> +      */
> +     if (target >= nr_cpu_ids) {
> +             nodeid = cpu_to_node(cpu);
> +             cpumask = cpumask_of_node(nodeid);
> +             target = cpumask_any_but(cpumask, cpu);
> +             nd_pmu->cpu = target;
> +
> +             if (target >= nr_cpu_ids)
> +                     return -1;
> +     }
> +
> +     return 0;
> +}
> +
> +static int nvdimm_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
> +{
> +     struct nvdimm_pmu *nd_pmu;
> +
> +     nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
> +
> +     if (nd_pmu->cpu >= nr_cpu_ids)
> +             nd_pmu->cpu = cpu;
> +
> +     return 0;
> +}

> +static int nvdimm_pmu_cpu_hotplug_init(struct nvdimm_pmu *nd_pmu)
> +{
> +     int nodeid, rc;
> +     const struct cpumask *cpumask;
> +
> +     /*
> +      * Incase cpu hotplug is not handled by arch specific code
> +      * they can still provide required cpumask which can be used
> +      * to get designatd cpu for counter access.
> +      * Check for any active cpu in nd_pmu->arch_cpumask.
> +      */
> +     if (!cpumask_empty(&nd_pmu->arch_cpumask)) {
> +             nd_pmu->cpu = cpumask_any(&nd_pmu->arch_cpumask);
> +     } else {
> +             /* pick active cpu from the cpumask of device numa node. */
> +             nodeid = dev_to_node(nd_pmu->dev);
> +             cpumask = cpumask_of_node(nodeid);
> +             nd_pmu->cpu = cpumask_any(cpumask);
> +     }
> +
> +     rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/nvdimm:online",
> +                                  nvdimm_pmu_cpu_online, 
> nvdimm_pmu_cpu_offline);
> +

Did you actually test this hotplug stuff?

That is, create a counter, unplug the CPU the counter was on, and
continue counting? "perf stat -I" is a good option for this, concurrent
with a hotplug.

Because I don't think it's actually correct. The thing is perf core is
strictly per-cpu, and it will place the event on a specific CPU context.
If you then unplug that CPU, nothing will touch the events on that CPU
anymore.

What drivers that span CPUs need to do is call
perf_pmu_migrate_context() whenever the CPU they were assigned to goes
away. Please have a look at arch/x86/events/rapl.c or
arch/x86/events/amd/power.c for relatively simple drivers that have this
property.


Reply via email to