On Mon, Jul 20, 2020 at 05:55:14PM +0200, Ahmed S. Darwish wrote:
> Preemption must be disabled before entering a sequence count write side
> critical section.  Failing to do so, the seqcount read side can preempt
> the write side section and spin for the entire scheduler tick.  If that
> reader belongs to a real-time scheduling class, it can spin forever and
> the kernel will livelock.
> 
> Assert through lockdep that preemption is disabled for seqcount writers.
> 

This patch is causing compile failures for various images (eg arm:allmodconfig,
arm:imx_v6_v7_defconfig, mips:allmodconfig).

In file included from arch/arm/include/asm/bug.h:60,
                 from include/linux/bug.h:5,
                 from include/linux/thread_info.h:12,
                 from include/asm-generic/current.h:5,
                 from ./arch/arm/include/generated/asm/current.h:1,
                 from include/linux/sched.h:12,
                 from arch/arm/kernel/asm-offsets.c:11:
include/linux/seqlock.h: In function 'write_seqcount_begin_nested':
include/asm-generic/percpu.h:31:40: error: implicit declaration of function 
'raw_smp_processor_id'

Reverting it fixes the problem. Is this being addressed ?

Guenter

> Signed-off-by: Ahmed S. Darwish <[email protected]>
> ---
>  include/linux/seqlock.h | 29 +++++++++++++++++++++++------
>  1 file changed, 23 insertions(+), 6 deletions(-)
> 
> diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
> index e885702d8b82..54bc20496392 100644
> --- a/include/linux/seqlock.h
> +++ b/include/linux/seqlock.h
> @@ -266,6 +266,12 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
>       kcsan_nestable_atomic_end();
>  }
>  
> +static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
> +{
> +     raw_write_seqcount_begin(s);
> +     seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
> +}
> +
>  /**
>   * write_seqcount_begin_nested() - start a seqcount_t write section with
>   *                                 custom lockdep nesting level
> @@ -276,8 +282,19 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
>   */
>  static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
>  {
> -     raw_write_seqcount_begin(s);
> -     seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
> +     lockdep_assert_preemption_disabled();
> +     __write_seqcount_begin_nested(s, subclass);
> +}
> +
> +/*
> + * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
> + *
> + * Use for internal seqlock.h code where it's known that preemption is
> + * already disabled. For example, seqlock_t write side functions.
> + */
> +static inline void __write_seqcount_begin(seqcount_t *s)
> +{
> +     __write_seqcount_begin_nested(s, 0);
>  }
>  
>  /**
> @@ -575,7 +592,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, 
> unsigned start)
>  static inline void write_seqlock(seqlock_t *sl)
>  {
>       spin_lock(&sl->lock);
> -     write_seqcount_begin(&sl->seqcount);
> +     __write_seqcount_begin(&sl->seqcount);
>  }
>  
>  /**
> @@ -601,7 +618,7 @@ static inline void write_sequnlock(seqlock_t *sl)
>  static inline void write_seqlock_bh(seqlock_t *sl)
>  {
>       spin_lock_bh(&sl->lock);
> -     write_seqcount_begin(&sl->seqcount);
> +     __write_seqcount_begin(&sl->seqcount);
>  }
>  
>  /**
> @@ -628,7 +645,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
>  static inline void write_seqlock_irq(seqlock_t *sl)
>  {
>       spin_lock_irq(&sl->lock);
> -     write_seqcount_begin(&sl->seqcount);
> +     __write_seqcount_begin(&sl->seqcount);
>  }
>  
>  /**
> @@ -649,7 +666,7 @@ static inline unsigned long 
> __write_seqlock_irqsave(seqlock_t *sl)
>       unsigned long flags;
>  
>       spin_lock_irqsave(&sl->lock, flags);
> -     write_seqcount_begin(&sl->seqcount);
> +     __write_seqcount_begin(&sl->seqcount);
>       return flags;
>  }
>  
> -- 
> 2.20.1
> 

Reply via email to