Signed-off-by: WeiY <wei.a.y...@gmail.com> --- cpukit/score/include/rtems/score/atomic.h | 465 ++++++++++++++++------------- 1 file changed, 251 insertions(+), 214 deletions(-)
diff --git a/cpukit/score/include/rtems/score/atomic.h b/cpukit/score/include/rtems/score/atomic.h index 3b62cb1..3378564 100644 --- a/cpukit/score/include/rtems/score/atomic.h +++ b/cpukit/score/include/rtems/score/atomic.h @@ -8,7 +8,7 @@ */ /* - * COPYRIGHT (c) 2012 Deng Hengyi. + * COPYRIGHT (c) 2013 Deng Hengyi. * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -32,241 +32,278 @@ extern "C" { /**@{*/ /** - * @brief the enumeration Atomic_Memory_barrier specifies the detailed regular - * memory synchronization operations used in the atomic operation API - * definitions. + * @brief Atomically load an atomic type value from object @a object with + * a type of Atomic_Order @a order. The @a order shall not be + * ATOMIC_ORDER_RELEASE. */ -typedef enum { - /** no operation orders memory. */ - ATOMIC_RELAXED_BARRIER, - /** a load operation performs an acquire operation on the affected memory - * location. This flag guarantees that the effects of load operation are - * completed before the effects of any later data accesses. - */ - ATOMIC_ACQUIRE_BARRIER, - /** a store operation performs a release operation on the affected memory - * location. This flag guarantee that all effects of all previous data - * accesses are completed before the store operation takes place. - */ - ATOMIC_RELEASE_BARRIER -} Atomic_Memory_barrier; +RTEMS_INLINE_ROUTINE uint_fast32_t Atomic_Load_uint( + volatile Atomic_Uint *object, + Atomic_Order order +) +{ + return _CPU_Atomic_Load_uint(object, order); +} +RTEMS_INLINE_ROUTINE uintptr_t Atomic_Load_ptr( + volatile Atomic_Pointer *object, + Atomic_Order order +) +{ + return _CPU_Atomic_Load_ptr(object, order); +} +RTEMS_INLINE_ROUTINE bool Atomic_Load_flag( + volatile Atomic_Flag *object, + Atomic_Order order +) +{ + return _CPU_Atomic_Load_flag(object, order); +} /** - * @brief Atomically load an atomic type value from address @a address with - * a type of Atomic_Memory_barrier @a memory_barrier. The @a memory_barrier - * shall not be ATOMIC_RELEASE_BARRIER. + * @brief Atomically store an atomic type value @a value into object @a + * object with a type of Atomic_Order @a order. The @a + * order shall not be ATOMIC_ORDER_ACQUIRE. */ -RTEMS_INLINE_ROUTINE Atomic_Int _Atomic_Load_int( - volatile Atomic_Int *address, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE Atomic_Long _Atomic_Load_long( - volatile Atomic_Long *address, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE Atomic_Pointer _Atomic_Load_ptr( - volatile Atomic_Pointer *address, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE Atomic_Int32 _Atomic_Load_32( - volatile Atomic_Int32 *address, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE Atomic_Int64 _Atomic_Load_64( - volatile Atomic_Int64 *address, - Atomic_Memory_barrier memory_barrier -); +RTEMS_INLINE_ROUTINE void Atomic_Store_uint( + volatile Atomic_Uint *object, + uint_fast32_t value, + Atomic_Order order +) +{ + _CPU_Atomic_Store_uint(object, value, order); +} +RTEMS_INLINE_ROUTINE void Atomic_Store_ptr( + volatile Atomic_Pointer *object, + uintptr_t value, + Atomic_Order order +) +{ + _CPU_Atomic_Store_ptr(object, value, order); +} +RTEMS_INLINE_ROUTINE void Atomic_Store_flag( + volatile Atomic_Flag *object, + bool value, + Atomic_Order order +) +{ + _CPU_Atomic_Store_flag(object, value, order); +} /** - * @brief Atomically store an atomic type value @a value into address @a - * address with a type of Atomic_Memory_barrier @a memory_barrier. The @a - * memory_barrier shall not be ATOMIC_ACQUIRE_BARRIER. + * @brief Atomically load-add-store an atomic type value @a value into object + * @a object with a type of Atomic_Order @a order. Return the value of @a object + * before atomic operation. */ -RTEMS_INLINE_ROUTINE void _Atomic_Store_int( - volatile Atomic_Int *address, - Atomic_Int value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Store_long( - volatile Atomic_Long *address, - Atomic_Long value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Store_ptr( - volatile Atomic_Pointer *address, - Atomic_Pointer value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Store_32( - volatile Atomic_Int32 *address, - Atomic_Int32 value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Store_64( - volatile Atomic_Int64 *address, - Atomic_Int64 value, - Atomic_Memory_barrier memory_barrier -); +RTEMS_INLINE_ROUTINE uint_fast32_t Atomic_Fetch_add_uint( + volatile Atomic_Uint *object, + uint_fast32_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_add_uint(object, value, order); +} +RTEMS_INLINE_ROUTINE uintptr_t Atomic_Fetch_add_ptr( + volatile Atomic_Pointer *object, + uintptr_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_add_ptr(object, value, order); +} +RTEMS_INLINE_ROUTINE bool Atomic_Fetch_add_flag( + volatile Atomic_Flag *object, + bool value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_add_flag(object, value, order); +} /** - * @brief Atomically load-add-store an atomic type value @a value into address - * @a address with a type of Atomic_Memory_barrier @a memory_barrier. + * @brief Atomically load-sub-store an atomic type value @a value into object + * @a object with a type of Atomic_Order @a order. Return the value of @a object + * before atomic operation. */ -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_add_int( - volatile Atomic_Int *address, - Atomic_Int value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_add_long( - volatile Atomic_Long *address, - Atomic_Long value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_add_ptr( - volatile Atomic_Pointer *address, - Atomic_Pointer value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_add_32( - volatile Atomic_Int32 *address, - Atomic_Int32 value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_add_64( - volatile Atomic_Int64 *address, - Atomic_Int64 value, - Atomic_Memory_barrier memory_barrier -); +RTEMS_INLINE_ROUTINE uint_fast32_t Atomic_Fetch_sub_uint( + volatile Atomic_Uint *object, + uint_fast32_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_sub_uint(object, value, order); +} +RTEMS_INLINE_ROUTINE uintptr_t Atomic_Fetch_sub_ptr( + volatile Atomic_Pointer *object, + uintptr_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_sub_ptr(object, value, order); +} +RTEMS_INLINE_ROUTINE bool Atomic_Fetch_sub_flag( + volatile Atomic_Flag *object, + bool value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_sub_flag(object, value, order); +} /** - * @brief Atomically load-sub-store an atomic type value @a value into address - * @a address with a type of Atomic_Memory_barrier @a memory_barrier. + * @brief Atomically load-or-store an atomic type value @a value into object + * @a object with a type of Atomic_Order @a order. Return the value of @a object + * before atomic operation. */ -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_sub_int( - volatile Atomic_Int *address, - Atomic_Int value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_sub_long( - volatile Atomic_Long *address, - Atomic_Long value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_sub_ptr( - volatile Atomic_Pointer *address, - Atomic_Pointer value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_sub_32( - volatile Atomic_Int32 *address, - Atomic_Int32 value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_sub_64( - volatile Atomic_Int64 *address, - Atomic_Int64 value, - Atomic_Memory_barrier memory_barrier -); +RTEMS_INLINE_ROUTINE uint_fast32_t Atomic_Fetch_or_uint( + volatile Atomic_Uint *object, + uint_fast32_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_or_uint(object, value, order); +} +RTEMS_INLINE_ROUTINE uintptr_t Atomic_Fetch_or_ptr( + volatile Atomic_Pointer *object, + uintptr_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_or_ptr(object, value, order); +} +RTEMS_INLINE_ROUTINE bool Atomic_Fetch_or_flag( + volatile Atomic_Flag *object, + bool value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_or_flag(object, value, order); +} /** - * @brief Atomically load-or-store an atomic type value @a value into address - * @a address with a type of Atomic_Memory_barrier @a memory_barrier. + * @brief Atomically load-and-store an atomic type value @a value into object + * @a object with a type of Atomic_Order @a order. Return the value of @a object + * before atomic operation. */ -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_or_int( - volatile Atomic_Int *address, - Atomic_Int value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_or_long( - volatile Atomic_Long *address, - Atomic_Long value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_or_ptr( - volatile Atomic_Pointer *address, - Atomic_Pointer value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_or_32( - volatile Atomic_Int32 *address, - Atomic_Int32 value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_or_64( - volatile Atomic_Int64 *address, - Atomic_Int64 value, - Atomic_Memory_barrier memory_barrier -); + RTEMS_INLINE_ROUTINE uint_fast32_t Atomic_Fetch_and_uint( + volatile Atomic_Uint *object, + uint_fast32_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_and_uint(object, value, order); +} +RTEMS_INLINE_ROUTINE uintptr_t Atomic_Fetch_and_ptr( + volatile Atomic_Pointer *object, + uintptr_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_and_ptr(object, value, order); +} +RTEMS_INLINE_ROUTINE bool Atomic_Fetch_and_flag( + volatile Atomic_Flag *object, + bool value, + Atomic_Order order +) +{ + return _CPU_Atomic_Fetch_and_flag(object, value, order); +} /** - * @brief Atomically load-and-store an atomic type value @a value into address - * @a address with a type of Atomic_Memory_barrier @a memory_barrier. - */ -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_and_int( - volatile Atomic_Int *address, - Atomic_Int value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_and_long( - volatile Atomic_Long *address, - Atomic_Long value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_and_ptr( - volatile Atomic_Pointer *address, - Atomic_Pointer value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_and_32( - volatile Atomic_Int32 *address, - Atomic_Int32 value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE void _Atomic_Fetch_and_64( - volatile Atomic_Int64 *address, - Atomic_Int64 value, - Atomic_Memory_barrier memory_barrier -); +* @brief Atomically exchange the value of @a +* object with @a value. Returns the value before exchange. +* The operation uses a type of Atomic_Order @a order. +*/ +RTEMS_INLINE_ROUTINE uint_fast32_t Atomic_Exchange_uint( + volatile Atomic_Uint *object, + uint_fast32_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Exchange_uint(object, value, order); +} +RTEMS_INLINE_ROUTINE uintptr_t Atomic_Exchange_ptr( + volatile Atomic_Pointer *object, + uintptr_t value, + Atomic_Order order +) +{ + return _CPU_Atomic_Exchange_ptr(object, value, order); +} +RTEMS_INLINE_ROUTINE bool Atomic_Exchange_flag( + volatile Atomic_Flag *object, + bool value, + Atomic_Order order +) +{ + return _CPU_Atomic_Exchange_flag(object, value, order); +} /** - * @brief Atomically compare the value stored at @a address with @a - * old_value and if the two values are equal, update the value of @a - * address with @a new_value. Returns zero if the compare failed, - * nonzero otherwise. The operation uses a type of Atomic_Memory_barrier - * @a memory_barrier. + * @brief Atomically compare the value stored at @a object with @a + * old_value and if the two values are equal, update the value of @a + * address with @a new_value. Returns zero if the compare failed, + * nonzero otherwise. The operation uses a type of Atomic_Order + * @a order_succ for successful order and a type of Atomic_Order + * @a order_fail for failled order. */ -RTEMS_INLINE_ROUTINE int _Atomic_Compare_exchange_int( - volatile Atomic_Int *address, - Atomic_Int old_value, - Atomic_Int new_value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE int _Atomic_Compare_exchange_long( - volatile Atomic_Long *address, - Atomic_Long old_value, - Atomic_Long new_value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE int _Atomic_Compare_exchange_ptr( - volatile Atomic_Pointer *address, - Atomic_Pointer old_value, - Atomic_Pointer new_value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE int _Atomic_Compare_exchange_32( - volatile Atomic_Int32 *address, - Atomic_Int32 old_value, - Atomic_Int32 new_value, - Atomic_Memory_barrier memory_barrier -); -RTEMS_INLINE_ROUTINE int _Atomic_Compare_exchange_64( - volatile Atomic_Int64 *address, - Atomic_Int64 old_value, - Atomic_Int64 new_value, - Atomic_Memory_barrier memory_barrier -); +RTEMS_INLINE_ROUTINE bool Atomic_Compare_exchange_uint( + volatile Atomic_Uint *object, + uint_fast32_t *old_value, + uint_fast32_t new_value, + Atomic_Order order_succ, + Atomic_Order order_fail +) +{ + return _CPU_Atomic_Compare_exchange_uint(object, old_value, new_value, + order_succ, order_fail); +} +RTEMS_INLINE_ROUTINE bool Atomic_Compare_exchange_ptr( + volatile Atomic_Pointer *object, + uintptr_t *old_value, + uintptr_t new_value, + Atomic_Order order_succ, + Atomic_Order order_fail +) +{ + return _CPU_Atomic_Compare_exchange_ptr(object, old_value, new_value, + order_succ, order_fail); +} +RTEMS_INLINE_ROUTINE bool Atomic_Compare_exchange_flag( + volatile Atomic_Flag *object, + bool *old_value, + bool new_value, + Atomic_Order order_succ, + Atomic_Order order_fail +) +{ + return _CPU_Atomic_Compare_exchange_flag(object, old_value, new_value, + order_succ, order_fail); +} -#include <rtems/score/atomic.inl> +/** +* @brief Atomically clear the value of an atomic flag type object @a +* with a type of Atomic_Order @a order. +*/ +RTEMS_INLINE_ROUTINE void Atomic_Clear_flag( + volatile Atomic_Flag *object, + Atomic_Order order +) +{ + _CPU_Atomic_Clear_flag(object, order); +} + +/** +* @brief Atomically test and clear the value of an atomic flag type +* object @a with a type of Atomic_Order @a order. +* if it is successful it will reture ture otherwise false. +*/ +RTEMS_INLINE_ROUTINE bool Atomic_Test_set_flag( + volatile Atomic_Flag *object, + Atomic_Order order +) +{ + return _CPU_Atomic_Test_set_flag(object, order); +} #ifdef __cplusplus } -- 1.7.9.5 _______________________________________________ rtems-devel mailing list rtems-devel@rtems.org http://www.rtems.org/mailman/listinfo/rtems-devel