On 04.04.2017 21:48, Brian Brooks wrote: > Signed-off-by: Ola Liljedahl <ola.liljed...@arm.com> > Reviewed-by: Brian Brooks <brian.bro...@arm.com> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
> +/****************************************************************************** > + * bitset abstract data type > + > *****************************************************************************/ > +/* This could be a struct of scalars to support larger bit sets */ > + > +#if ATOM_BITSET_SIZE <= 32 Maybe I missed, where did you set this macro? Also, why do you need several versions of bitset? Can you stick to one size that fits all? > +typedef uint32_t bitset_t; > + > +static inline bitset_t bitset_mask(uint32_t bit) > +{ > + return 1UL << bit; > +} > + > +/* Return first-bit-set with StdC ffs() semantics */ > +static inline uint32_t bitset_ffs(bitset_t b) > +{ > + return __builtin_ffsl(b); > +} > + > +/* Load-exclusive with memory ordering */ > +static inline bitset_t bitset_ldex(bitset_t *bs, int mo) > +{ > + return LDXR32(bs, mo); > +} > + > +#elif ATOM_BITSET_SIZE <= 64 > + > +typedef uint64_t bitset_t; > + > +static inline bitset_t bitset_mask(uint32_t bit) > +{ > + return 1ULL << bit; > +} > + > +/* Return first-bit-set with StdC ffs() semantics */ > +static inline uint32_t bitset_ffs(bitset_t b) > +{ > + return __builtin_ffsll(b); > +} > + > +/* Load-exclusive with memory ordering */ > +static inline bitset_t bitset_ldex(bitset_t *bs, int mo) > +{ > + return LDXR64(bs, mo); > +} > + > +#elif ATOM_BITSET_SIZE <= 128 > + > +#if __SIZEOF_INT128__ == 16 > +typedef unsigned __int128 bitset_t; > + > +static inline bitset_t bitset_mask(uint32_t bit) > +{ > + if (bit < 64) > + return 1ULL << bit; > + else > + return (unsigned __int128)(1ULL << (bit - 64)) << 64; > +} > + > +/* Return first-bit-set with StdC ffs() semantics */ > +static inline uint32_t bitset_ffs(bitset_t b) > +{ > + if ((uint64_t)b != 0) > + return __builtin_ffsll((uint64_t)b); > + else if ((b >> 64) != 0) > + return __builtin_ffsll((uint64_t)(b >> 64)) + 64; > + else > + return 0; > +} > + > +/* Load-exclusive with memory ordering */ > +static inline bitset_t bitset_ldex(bitset_t *bs, int mo) > +{ > + return LDXR128(bs, mo); > +} > + > +#else > +#error __int128 not supported by compiler > +#endif > + > +#else > +#error Unsupported size of bit sets (ATOM_BITSET_SIZE) > +#endif > + > +/* Atomic load with memory ordering */ > +static inline bitset_t atom_bitset_load(bitset_t *bs, int mo) > +{ > + return __atomic_load_n(bs, mo); > +} > + > +/* Atomic bit set with memory ordering */ > +static inline void atom_bitset_set(bitset_t *bs, uint32_t bit, int mo) > +{ > + (void)__atomic_fetch_or(bs, bitset_mask(bit), mo); > +} > + > +/* Atomic bit clear with memory ordering */ > +static inline void atom_bitset_clr(bitset_t *bs, uint32_t bit, int mo) > +{ > + (void)__atomic_fetch_and(bs, ~bitset_mask(bit), mo); > +} > + > +/* Atomic exchange with memory ordering */ > +static inline bitset_t atom_bitset_xchg(bitset_t *bs, bitset_t neu, int mo) > +{ > + return __atomic_exchange_n(bs, neu, mo); > +} > + Any real reason for the following defines? Why do you need them? > +/* Return a & ~b */ > +static inline bitset_t bitset_andn(bitset_t a, bitset_t b) > +{ > + return a & ~b; > +} > + > +static inline bool bitset_is_eql(bitset_t a, bitset_t b) > +{ > + return a == b; > +} > + > +static inline bitset_t bitset_clr(bitset_t bs, uint32_t bit) > +{ > + return bs & ~bitset_mask(bit); > +} > + > +static inline bitset_t bitset_set(bitset_t bs, uint32_t bit) > +{ > + return bs | bitset_mask(bit); > +} > + > +static inline bitset_t bitset_null(void) > +{ > + return 0U; > +} > + > +static inline bool bitset_is_null(bitset_t a) > +{ > + return a == 0U; > +} > + > +static inline bool bitset_is_set(bitset_t a, uint32_t bit) > +{ > + return (a & bitset_mask(bit)) != 0; > +} > + > +#endif > -- With best wishes Dmitry