Jason Wessel wrote:
> Will Deacon wrote:
> 
>>> Clearly, kgdb is using atomic_set()/atomic_read() in a way which does not
>>> match this documentation - it's certainly missing the barriers as required
>>> by the above quoted paragraphs.
>>>
>>> Let me repeat: atomic_set() and atomic_read() are NOT atomic.  There's
>>> nothing atomic about them.  All they do is provide a pair of accessors
>>> to the underlying value in the atomic type.  They are no different to
>>> declaring a volatile int and reading/writing it directly.
> 
> 
> Clearly the docs state this.
> 
>> Indeed. I'm not familiar enough with KGDB internals to dive in and look at 
>> all the
>> potential barrier conflicts, so I'll submit a patch that addresses the one 
>> that's
>> bitten me so far. Maybe it will motivate somebody else to take a closer look!
>>
> 
> 
> Do you think you can try the patch below?
> 
> It seems we might not need to change to using the atomic_add_return(0,...) 
> because using the atomic_inc() and atomic_dec() will end up using the memory 
> barriers.
>

Hi Jason,

Maybe we should initial the atomic_t variable before we using such as 
atomic_inc/dec() directly.

Dongdong.


--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -227,6 +227,17 @@ kgdb_post_primary_code(struct pt_regs *regs, int 
e_vector, int err_code)
         return;
  }

+static void kgdb_initial_atomic_var()
+{
+       int i;
+       for (i = NR_CPUS-1; i >= 0; i--) {
+               atomic_set(&passive_cpu_wait[i], 0);
+               atomic_set(&cpu_in_kgdb[i], 0);
+       }
+
+       atomic_set(&kgdb_setting_breakpoint, 0);
+}
+
  /**
   *     kgdb_disable_hw_debug - Disable hardware debugging while we in 
kgdb.
   *     @regs: Current &struct pt_regs.
@@ -1619,6 +1630,7 @@ static void kgdb_register_callbacks(void)
  {
         if (!kgdb_io_module_registered) {
                 kgdb_io_module_registered = 1;
+               kgdb_initial_atomic_var();
                 kgdb_arch_init();
  #ifdef CONFIG_MAGIC_SYSRQ
                 register_sysrq_key('g', &sysrq_gdb_op);




> I would certainly rather fix kgdb vs mucking with the internals of 
> cpu_relax().
> 
> 
> Jason.
> 
> --- a/kernel/kgdb.c
> +++ b/kernel/kgdb.c
> @@ -580,14 +580,13 @@ static void kgdb_wait(struct pt_regs *re
>        * Make sure the above info reaches the primary CPU before
>        * our cpu_in_kgdb[] flag setting does:
>        */
> -     smp_wmb();
> -     atomic_set(&cpu_in_kgdb[cpu], 1);
> +     atomic_inc(&cpu_in_kgdb[cpu]);
>  
>       /* Disable any cpu specific hw breakpoints */
>       kgdb_disable_hw_debug(regs);
>  
>       /* Wait till primary CPU is done with debugging */
> -     while (atomic_read(&passive_cpu_wait[cpu]))
> +     while (atomic_add_return(0, &passive_cpu_wait[cpu]))
>               cpu_relax();
>  
>       kgdb_info[cpu].debuggerinfo = NULL;
> @@ -598,7 +597,7 @@ static void kgdb_wait(struct pt_regs *re
>               arch_kgdb_ops.correct_hw_break();
>  
>       /* Signal the primary CPU that we are done: */
> -     atomic_set(&cpu_in_kgdb[cpu], 0);
> +     atomic_dec(&cpu_in_kgdb[cpu]);
>       touch_softlockup_watchdog_sync();
>       clocksource_touch_watchdog();
>       local_irq_restore(flags);
> @@ -1493,7 +1492,7 @@ acquirelock:
>        * spin_lock code is good enough as a barrier so we don't
>        * need one here:
>        */
> -     atomic_set(&cpu_in_kgdb[ks->cpu], 1);
> +     atomic_inc(&cpu_in_kgdb[ks->cpu]);
>  
>  #ifdef CONFIG_SMP
>       /* Signal the other CPUs to enter kgdb_wait() */
> @@ -1505,7 +1504,7 @@ acquirelock:
>        * Wait for the other CPUs to be notified and be waiting for us:
>        */
>       for_each_online_cpu(i) {
> -             while (!atomic_read(&cpu_in_kgdb[i]))
> +             while (!atomic_add_return(0, &cpu_in_kgdb[i]))
>                       cpu_relax();
>       }
>  
> @@ -1528,7 +1527,7 @@ acquirelock:
>  
>       kgdb_info[ks->cpu].debuggerinfo = NULL;
>       kgdb_info[ks->cpu].task = NULL;
> -     atomic_set(&cpu_in_kgdb[ks->cpu], 0);
> +     atomic_dec(&cpu_in_kgdb[ks->cpu]);
>  
>       if (!kgdb_single_step) {
>               for (i = NR_CPUS-1; i >= 0; i--)
> @@ -1538,7 +1537,7 @@ acquirelock:
>                * from the debugger.
>                */
>               for_each_online_cpu(i) {
> -                     while (atomic_read(&cpu_in_kgdb[i]))
> +                     while (atomic_add_return(0, &cpu_in_kgdb[i]))
>                               cpu_relax();
>               }
>       }
> @@ -1742,11 +1741,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
>   */
>  void kgdb_breakpoint(void)
>  {
> -     atomic_set(&kgdb_setting_breakpoint, 1);
> +     atomic_inc(&kgdb_setting_breakpoint);
>       wmb(); /* Sync point before breakpoint */
>       arch_kgdb_breakpoint();
>       wmb(); /* Sync point after breakpoint */
> -     atomic_set(&kgdb_setting_breakpoint, 0);
> +     atomic_dec(&kgdb_setting_breakpoint);
>  }
>  EXPORT_SYMBOL_GPL(kgdb_breakpoint);
>  
> 
> ------------------------------------------------------------------------------
> Download Intel® Parallel Studio Eval
> Try the new software tools for yourself. Speed compiling, find bugs
> proactively, and fine-tune applications for parallel performance.
> See why Intel Parallel Studio got high marks during beta.
> http://p.sf.net/sfu/intel-sw-dev
> _______________________________________________
> Kgdb-bugreport mailing list
> [email protected]
> https://lists.sourceforge.net/lists/listinfo/kgdb-bugreport
> 


------------------------------------------------------------------------------
Download Intel® Parallel Studio Eval
Try the new software tools for yourself. Speed compiling, find bugs
proactively, and fine-tune applications for parallel performance.
See why Intel Parallel Studio got high marks during beta.
http://p.sf.net/sfu/intel-sw-dev
_______________________________________________
Kgdb-bugreport mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kgdb-bugreport

Reply via email to