Hi david,
> -----Original Message-----
> From: David Marchand <david.march...@redhat.com>
> Sent: Monday, October 28, 2019 4:50 AM
> To: Gavin Hu (Arm Technology China) <gavin...@arm.com>
> Cc: dev <dev@dpdk.org>; nd <n...@arm.com>; Ananyev, Konstantin
> <konstantin.anan...@intel.com>; tho...@monjalon.net; Stephen
> Hemminger <step...@networkplumber.org>; hemant.agra...@nxp.com;
> jer...@marvell.com; Pavan Nikhilesh <pbhagavat...@marvell.com>;
> Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>; Ruifeng Wang
> (Arm Technology China) <ruifeng.w...@arm.com>; Phil Yang (Arm
> Technology China) <phil.y...@arm.com>; Steve Capper
> <steve.cap...@arm.com>
> Subject: Re: [PATCH v11 2/5] eal: add the APIs to wait until equal
> 
> On Sun, Oct 27, 2019 at 1:53 PM Gavin Hu <gavin...@arm.com> wrote:
> 
> [snip]
> 
> > diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > index 93895d3..1680d7a 100644
> > --- a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > +++ b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> 
> [snip]
> 
> > @@ -17,6 +23,188 @@ static inline void rte_pause(void)
> >         asm volatile("yield" ::: "memory");
> >  }
> >
> > +/**
> > + * Send an event to quit WFE.
> > + */
> > +static inline void rte_sevl(void);
> > +
> > +/**
> > + * Put processor into low power WFE(Wait For Event) state
> > + */
> > +static inline void rte_wfe(void);
> > +
> > +#ifdef RTE_ARM_USE_WFE
> > +static inline void rte_sevl(void)
> > +{
> > +       asm volatile("sevl" : : : "memory");
> > +}
> > +
> > +static inline void rte_wfe(void)
> > +{
> > +       asm volatile("wfe" : : : "memory");
> > +}
> > +#else
> > +static inline void rte_sevl(void)
> > +{
> > +}
> > +static inline void rte_wfe(void)
> > +{
> > +       rte_pause();
> > +}
> > +#endif
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> 
> experimental?
> Just complaining on the principle, you missed the __rte_experimental
> in such a case.
> But this API is a no go for me, see below.
Got it, thanks!
> 
> > + *
> > + * Atomic exclusive load from addr, it returns the 16-bit content of *addr
> > + * while making it 'monitored',when it is written by someone else, the
> > + * 'monitored' state is cleared and a event is generated implicitly to exit
> > + * WFE.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint16_t
> > +rte_atomic_load_ex_16(volatile uint16_t *addr, int memorder);
> 
> This API does not make sense for anything but arm, so this prefix is not good.
Yes, we can change back to __ atomic_load_ex_16?
> 
> On arm, when RTE_ARM_USE_WFE is undefined, why would you need it?
> A non exclusive load is enough since you don't want to use wfe.
We can move it inside #ifdef RTE_ARM_USE_WFE .. #endif.
> [snip]
> 
> > +
> > +static __rte_always_inline uint16_t
> > +rte_atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> > +{
> > +       uint16_t tmp;
> > +       assert((memorder == __ATOMIC_ACQUIRE)
> > +                       || (memorder == __ATOMIC_RELAXED));
> > +       if (memorder == __ATOMIC_ACQUIRE)
> > +               asm volatile("ldaxrh %w[tmp], [%x[addr]]"
> > +                       : [tmp] "=&r" (tmp)
> > +                       : [addr] "r"(addr)
> > +                       : "memory");
> > +       else if (memorder == __ATOMIC_RELAXED)
> > +               asm volatile("ldxrh %w[tmp], [%x[addr]]"
> > +                       : [tmp] "=&r" (tmp)
> > +                       : [addr] "r"(addr)
> > +                       : "memory");
> > +       return tmp;
> > +}
> > +
> > +static __rte_always_inline uint32_t
> > +rte_atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> > +{
> > +       uint32_t tmp;
> > +       assert((memorder == __ATOMIC_ACQUIRE)
> > +                       || (memorder == __ATOMIC_RELAXED));
> > +       if (memorder == __ATOMIC_ACQUIRE)
> > +               asm volatile("ldaxr %w[tmp], [%x[addr]]"
> > +                       : [tmp] "=&r" (tmp)
> > +                       : [addr] "r"(addr)
> > +                       : "memory");
> > +       else if (memorder == __ATOMIC_RELAXED)
> > +               asm volatile("ldxr %w[tmp], [%x[addr]]"
> > +                       : [tmp] "=&r" (tmp)
> > +                       : [addr] "r"(addr)
> > +                       : "memory");
> > +       return tmp;
> > +}
> > +
> > +static __rte_always_inline uint64_t
> > +rte_atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> > +{
> > +       uint64_t tmp;
> > +       assert((memorder == __ATOMIC_ACQUIRE)
> > +                       || (memorder == __ATOMIC_RELAXED));
> > +       if (memorder == __ATOMIC_ACQUIRE)
> > +               asm volatile("ldaxr %x[tmp], [%x[addr]]"
> > +                       : [tmp] "=&r" (tmp)
> > +                       : [addr] "r"(addr)
> > +                       : "memory");
> > +       else if (memorder == __ATOMIC_RELAXED)
> > +               asm volatile("ldxr %x[tmp], [%x[addr]]"
> > +                       : [tmp] "=&r" (tmp)
> > +                       : [addr] "r"(addr)
> > +                       : "memory");
> > +       return tmp;
> > +}
> > +
> > +#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > +int memorder)
> > +{
> > +       if (__atomic_load_n(addr, memorder) != expected) {
> > +               rte_sevl();
> > +               do {
> > +                       rte_wfe();
> 
> 
> We are in the RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED case.
> rte_wfe() is always asm volatile("wfe" : : : "memory");
> 
> 
> > +               } while (rte_atomic_load_ex_16(addr, memorder) != expected);
> > +       }
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> > +int memorder)
> > +{
> > +       if (__atomic_load_n(addr, memorder) != expected) {
> > +               rte_sevl();
> > +               do {
> > +                       rte_wfe();
> > +               } while (__atomic_load_n(addr, memorder) != expected);
> > +       }
> > +}
> 
> The while() should be with an exclusive load.
Sorry for this explicit error. 
> 
> 
> I will submit a v12 with those comments addressed so that we move
> forward for rc2.
> But it won't make it in rc1, sorry.
I will do it if you prefer, otherwise thanks!
> 
> 
> --
> David Marchand

Reply via email to