> > ...
> >
> > This cannot happen in async mode, since the output would be buffered and
> > printk() never called on behalf of the preempted handler.
> >
> >>
> >> let's say at the (*) point
> >>
> >> void __ipipe_flush_printk (unsigned virq)
> >> {
> >>       char *p = __ipipe_printk_buf;
> >>       int out = 0, len;
> >>
> >>       clear_bit(IPIPE_PPRINTK_FLAG,&ipipe_root_domain->flags);
> >>
> >>       while (out < __ipipe_printk_fill) {
> >>               len = strlen(p) + 1;
> >>               printk("%s",p);
> >>               p += len;
> >>               out += len;
> >>       }
> >> (*) <---------------------------- preempted
> >>       __ipipe_printk_fill = 0;
> >> }
> >>
> >> When linux gets controll back the virq continues its execution and
> >> sets __ipipe_printk_fill up to 0.
> >>
> >> This cannot happen only if virqs are manipulated with the primary
> >> domain being stalled as well. But you told "and under __Linux
> >> domain___ stalling protection in __ipipe_flush_printk since it's a
> >> virq handler".
> >>
> >>
> >>  > and finally, printk() cannot preempt
> >>  > __ipipe_flush_printk under normal operation mode (i.e. async mode).
> >> AFAICS,
> >>  > there's no race here.
>
> Mea culpa, Dmitry is right, in the above situation he depicted, we
> could drop a
> portion of the output buffer. A way to fix this would be to hw lock
> a test and
> decrement section of __ipipe_printk_fill in the flush handler.
>

Something like that (just a draft) would work probably but I suppose something a bit more graceful would be implemented. Actually, with a small optimization of IPIPE_PRINTK_FLAG.

void __ipipe_flush_printk (unsigned virq)
{
      char *p = __ipipe_printk_buf;
-       int out = 0, len;
+       int out = 0, len, used;

...
-       clear_bit(IPIPE_PPRINTK_FLAG,&ipipe_root_domain->flags);
...

+ spin_lock_irqsave_hw(&__ipipe_printk_lock,flags);
+ used = __ipipe_printk_fill;
+ spin_unlock_irqrestore_hw(&__ipipe_printk_lock,flags);

+ oncemore:

      while (out < used) {
              len = strlen(p) + 1;
              printk("%s",p);
              p += len;
              out += len;
      }

+ spin_lock_irqsave_hw(&__ipipe_printk_lock,flags);
+ if (__ipipe_printk_fill == used)
+ {
-       __ipipe_printk_fill = 0;

+     used = __ipipe_printk_fill = 0;

+ // when everything is done
+      clear_bit(IPIPE_PPRINTK_FLAG,&ipipe_root_domain->flags);

+ }
+ else

+ used = __ipipe_printk_fill;
+ spin_unlock_irqrestore_hw(&__ipipe_printk_lock,flags);

+ if (used)
+ goto oncemore;

...
}

and

asmlinkage int printk(const char *fmt, ...)
{
+ int virq_is_active;
...

      spin_lock_irqsave_hw(&__ipipe_printk_lock,flags);

      fbytes = __LOG_BUF_LEN - __ipipe_printk_fill;

      if (fbytes > 1) {
              r = vscnprintf(__ipipe_printk_buf + __ipipe_printk_fill,
                             fbytes, fmt, args) + 1; /* account for the null byte */
              __ipipe_printk_fill += r;
      } else
              r = 0;

+ virq_is_active = test_and_set_bit(IPIPE_PPRINTK_FLAG,&ipipe_root_domain->flags);

      spin_unlock_irqrestore_hw(&__ipipe_printk_lock,flags);

- if (!test_and_set_bit(IPIPE_PPRINTK_FLAG,&ipipe_root_domain->flags))
+      if (!virq_is_active)
              ipipe_trigger_irq(__ipipe_printk_virq);
out:
      va_end(args);

      return r;
}

> --
>
> Philippe.

---
Best regards,
Dmitry

_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to