From: Benjamin Boren <ben.bo...@intel.com>

Signed-off-by: Benjamin Boren <Ben.Boren at intel.com>
Signed-off-by: David Hunt <david.hunt at intel.com>
---
 .../common/include/arch/arm64/rte_atomic.h         | 269 +++++++++++++++++++++
 1 file changed, 269 insertions(+)
 create mode 100644 lib/librte_eal/common/include/arch/arm64/rte_atomic.h

diff --git a/lib/librte_eal/common/include/arch/arm64/rte_atomic.h 
b/lib/librte_eal/common/include/arch/arm64/rte_atomic.h
new file mode 100644
index 0000000..c9e0dff
--- /dev/null
+++ b/lib/librte_eal/common/include/arch/arm64/rte_atomic.h
@@ -0,0 +1,269 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ATOMIC_ARM64_H_
+#define _RTE_ATOMIC_ARM64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_atomic.h"
+
+/**
+ * @file
+ * Atomic Operations
+ *
+ * This file defines a API for atomic operations.
+ */
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define        rte_mb()  {asm volatile("dsb sy" : : : "memory"); }
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define        rte_wmb() {asm volatile("dsb st" : : : "memory"); }
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define        rte_rmb() {asm volatile("dsb ld" : : : "memory"); }
+
+
+
+/*------------------------- 16 bit atomic operations 
-------------------------*/
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+       return __sync_bool_compare_and_swap(dst, exp, src);
+}
+
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+       rte_atomic16_add(v, 1);
+}
+
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+       rte_atomic16_sub(v, 1);
+}
+
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+}
+
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+}
+
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+       return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+
+/*------------------------- 32 bit atomic operations 
-------------------------*/
+
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+       return __sync_bool_compare_and_swap(dst, exp, src);
+}
+
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+       rte_atomic32_add(v, 1);
+}
+
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+       rte_atomic32_sub(v, 1);
+}
+
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+}
+
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+}
+
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+       return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+/*------------------------- 64 bit atomic operations 
-------------------------*/
+
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+       return __sync_bool_compare_and_swap(dst, exp, src);
+}
+
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+#ifdef __LP64__
+       v->cnt = 0;
+#else
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                       tmp, 0);
+       }
+#endif
+}
+
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+#ifdef __LP64__
+       return v->cnt;
+#else
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               /* replace the value by itself */
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                       tmp, tmp);
+       }
+       return tmp;
+#endif
+}
+
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+#ifdef __LP64__
+       v->cnt = new_value;
+#else
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                       tmp, new_value);
+       }
+#endif
+}
+
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+       __sync_fetch_and_add(&v->cnt, inc);
+}
+
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+       __sync_fetch_and_sub(&v->cnt, dec);
+}
+
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+       rte_atomic64_add(v, 1);
+}
+
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+       rte_atomic64_sub(v, 1);
+}
+
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+       return __sync_add_and_fetch(&v->cnt, inc);
+}
+
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+       return __sync_sub_and_fetch(&v->cnt, dec);
+}
+
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+       return rte_atomic64_add_return(v, 1) == 0;
+}
+
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+       return rte_atomic64_sub_return(v, 1) == 0;
+}
+
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+       rte_atomic64_set(v, 0);
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_ARM64_H_ */
-- 
2.1.4

Reply via email to