I hate all of this, but if this will finally catch the actual problem,
we can then revert all this, so sure.

Also, I think this will conflict with the patches from Nadav that I have
queued:

  https://lkml.kernel.org/r/20210220231712.2475218-1-na...@vmware.com

which I'll be pushing to tip/x86/mm once -rc1 happens.

On Fri, Feb 26, 2021 at 12:25:21PM +0100, Juergen Gross wrote:

> +static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
> +{
> +     unsigned int this_cpu = smp_processor_id();
> +     struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
> +     struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
> +     struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
> +
> +     cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
> +     if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
> +             cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
> +             cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
> +             send_call_function_single_ipi(cpu);
> +             cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
> +     } else {
> +             cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
> +     }
> +}
>  #else
> +#define cfd_seq_store(var, src, dst, type)
> +
>  static void csd_lock_record(call_single_data_t *csd)
>  {
>  }
> @@ -290,6 +396,19 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, 
> csd_data);
>  
>  void __smp_call_single_queue(int cpu, struct llist_node *node)
>  {
> +#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
> +     if (static_branch_unlikely(&csdlock_debug_extended)) {
> +             unsigned int type;
> +
> +             type = CSD_TYPE(container_of(node, call_single_data_t,
> +                                          node.llist));
> +             if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
> +                     __smp_call_single_queue_debug(cpu, node);
> +                     return;
> +             }
> +     }
> +#endif

This is a bit weird, might as well put it in generic_exec_single()
because there you still know the type matches.


> @@ -712,12 +840,21 @@ static void smp_call_function_many_cond(const struct 
> cpumask *mask,
>               csd->node.src = smp_processor_id();
>               csd->node.dst = cpu;
>  #endif
> -             if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, 
> cpu)))
> +             cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
> +             if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, 
> cpu))) {
>                       __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
> +                     cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, 
> CFD_SEQ_IPI);
> +             } else {
> +                     cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, 
> CFD_SEQ_NOIPI);
> +             }
>       }
>  
>       /* Send a message to all CPUs in the map */
> +     cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu,
> +                   CFD_SEQ_NOCPU, CFD_SEQ_PING);
>       arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
> +     cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu,
> +                   CFD_SEQ_NOCPU, CFD_SEQ_PINGED);

Too bad we can't share with the single case, a well.

Reply via email to