Found when compiling the code with C++ binaries. Most of the issues are due to missing explicit cast.
Signed-off-by: Sairam Venugopal <[email protected]> Signed-off-by: Shireesh Kumar Singh <[email protected]> Co-authored-by: Shireesh Kumar Singh <[email protected]> --- include/openvswitch/util.h | 8 +++++++ lib/netlink.h | 2 +- lib/ovs-atomic-msvc.h | 57 ++++++++++++++++++++++++++++------------------ lib/ovs-thread.h | 4 ++-- lib/packets.h | 15 ++++++++---- lib/socket-util.h | 4 ++-- lib/unaligned.h | 23 +++++++++++++++---- 7 files changed, 77 insertions(+), 36 deletions(-) diff --git a/include/openvswitch/util.h b/include/openvswitch/util.h index c3e60d5..a1781e1 100644 --- a/include/openvswitch/util.h +++ b/include/openvswitch/util.h @@ -231,6 +231,14 @@ OVS_NO_RETURN void ovs_assert_failure(const char *, const char *, const char *); struct { MEMBERS }; \ uint8_t PAD_ID[ROUND_UP(sizeof(struct { MEMBERS }), UNIT)]; \ } +#elif defined(WIN32) +#define PADDED_MEMBERS_CACHELINE_MARKER(UNIT, CACHELINE, MEMBERS) \ + union { \ + OVS_CACHE_LINE_MARKER CACHELINE; \ + struct { MEMBERS }; \ + struct named_member_##CACHELINE { MEMBERS }; \ + uint8_t PAD_ID[ROUND_UP(sizeof named_member_##CACHELINE, UNIT)]; \ + } #else #define PADDED_MEMBERS_CACHELINE_MARKER(UNIT, CACHELINE, MEMBERS) \ union { \ diff --git a/lib/netlink.h b/lib/netlink.h index 6dfac27..3bee560 100644 --- a/lib/netlink.h +++ b/lib/netlink.h @@ -153,7 +153,7 @@ enum nl_attr_type static inline struct nlattr * nl_attr_next(const struct nlattr *nla) { - return (void *) ((uint8_t *) nla + NLA_ALIGN(nla->nla_len)); + return (struct nlattr *) ((uint8_t *) nla + NLA_ALIGN(nla->nla_len)); } static inline bool diff --git a/lib/ovs-atomic-msvc.h b/lib/ovs-atomic-msvc.h index 81f7682..5bc0246 100644 --- a/lib/ovs-atomic-msvc.h +++ b/lib/ovs-atomic-msvc.h @@ -98,8 +98,8 @@ atomic_signal_fence(memory_order order) #define atomic_store32(DST, SRC, ORDER) \ if (ORDER == memory_order_seq_cst) { \ - InterlockedExchange((int32_t volatile *) (DST), \ - (int32_t) (SRC)); \ + InterlockedExchange((long volatile *) (DST), \ + (long) (SRC)); \ } else { \ *(DST) = (SRC); \ } @@ -128,13 +128,18 @@ atomic_signal_fence(memory_order order) atomic_storeX(64, DST, SRC, ORDER) #endif -/* Used for 8 and 16 bit variations. */ -#define atomic_storeX(X, DST, SRC, ORDER) \ - if (ORDER == memory_order_seq_cst) { \ - InterlockedExchange##X((int##X##_t volatile *) (DST), \ - (int##X##_t) (SRC)); \ - } else { \ - *(DST) = (SRC); \ +#define atomic_store8(DST, SRC, ORDER) \ + if (ORDER == memory_order_seq_cst) { \ + InterlockedExchange8((char volatile *) (DST), (char) (SRC)); \ + } else { \ + *(DST) = (SRC); \ + } + +#define atomic_store16(DST, SRC, ORDER) \ + if (ORDER == memory_order_seq_cst) { \ + InterlockedExchange16((short volatile *) (DST), (short) (SRC)); \ + } else { \ + *(DST) = (SRC); \ } #define atomic_store(DST, SRC) \ @@ -142,9 +147,9 @@ atomic_signal_fence(memory_order order) #define atomic_store_explicit(DST, SRC, ORDER) \ if (sizeof *(DST) == 1) { \ - atomic_storeX(8, DST, SRC, ORDER) \ + atomic_store8(DST, SRC, ORDER) \ } else if (sizeof *(DST) == 2) { \ - atomic_storeX(16, DST, SRC, ORDER) \ + atomic_store16( DST, SRC, ORDER) \ } else if (sizeof *(DST) == 4) { \ atomic_store32(DST, SRC, ORDER) \ } else if (sizeof *(DST) == 8) { \ @@ -209,27 +214,33 @@ atomic_signal_fence(memory_order order) /* Arithmetic addition calls. */ +#define atomic_add8(RMW, ARG, ORIG, ORDER) \ + *(ORIG) = _InterlockedExchangeAdd8((char volatile *) (RMW), \ + (char) (ARG)); + +#define atomic_add16(RMW, ARG, ORIG, ORDER) \ + *(ORIG) = _InterlockedExchangeAdd16((short volatile *) (RMW), \ + (short) (ARG)); + #define atomic_add32(RMW, ARG, ORIG, ORDER) \ *(ORIG) = InterlockedExchangeAdd((int32_t volatile *) (RMW), \ (int32_t) (ARG)); - -/* For 8, 16 and 64 bit variations. */ -#define atomic_add_generic(X, RMW, ARG, ORIG, ORDER) \ - *(ORIG) = _InterlockedExchangeAdd##X((int##X##_t volatile *) (RMW), \ - (int##X##_t) (ARG)); +#define atomic_add64(RMW, ARG, ORIG, ORDER) \ + *(ORIG) = _InterlockedExchangeAdd64((int64_t volatile *) (RMW), \ + (int64_t) (ARG)); #define atomic_add(RMW, ARG, ORIG) \ atomic_add_explicit(RMW, ARG, ORIG, memory_order_seq_cst) #define atomic_add_explicit(RMW, ARG, ORIG, ORDER) \ if (sizeof *(RMW) == 1) { \ - atomic_op(add, 8, RMW, ARG, ORIG, ORDER) \ + atomic_add8(RMW, ARG, ORIG, ORDER) \ } else if (sizeof *(RMW) == 2) { \ - atomic_op(add, 16, RMW, ARG, ORIG, ORDER) \ + atomic_add16(RMW, ARG, ORIG, ORDER) \ } else if (sizeof *(RMW) == 4) { \ atomic_add32(RMW, ARG, ORIG, ORDER) \ } else if (sizeof *(RMW) == 8) { \ - atomic_op(add, 64, RMW, ARG, ORIG, ORDER) \ + atomic_add64(RMW, ARG, ORIG, ORDER) \ } else { \ abort(); \ } @@ -335,7 +346,8 @@ atomic_signal_fence(memory_order order) static inline bool atomic_compare_exchange8(int8_t volatile *dst, int8_t *expected, int8_t src) { - int8_t previous = _InterlockedCompareExchange8(dst, src, *expected); + int8_t previous = _InterlockedCompareExchange8((char volatile *)dst, + src, *expected); if (previous == *expected) { return true; } else { @@ -361,7 +373,8 @@ static inline bool atomic_compare_exchange32(int32_t volatile *dst, int32_t *expected, int32_t src) { - int32_t previous = InterlockedCompareExchange(dst, src, *expected); + int32_t previous = InterlockedCompareExchange((int volatile *)dst, + src, *expected); if (previous == *expected) { return true; } else { @@ -405,7 +418,7 @@ atomic_compare_unreachable() : ovs_fatal(0, "atomic operation with size greater than 8 bytes"), \ atomic_compare_unreachable())))) - + /* atomic_flag */ typedef ATOMIC(int32_t) atomic_flag; diff --git a/lib/ovs-thread.h b/lib/ovs-thread.h index 55e51a4..03fd804 100644 --- a/lib/ovs-thread.h +++ b/lib/ovs-thread.h @@ -260,7 +260,7 @@ void xpthread_join(pthread_t, void **); static inline NAME##_type * \ NAME##_get_unsafe(void) \ { \ - return &NAME##_var; \ + return (NAME##_type *)&NAME##_var; \ } \ \ static inline NAME##_type * \ @@ -316,7 +316,7 @@ void xpthread_join(pthread_t, void **); static inline NAME##_type * \ NAME##_get_unsafe(void) \ { \ - return pthread_getspecific(NAME##_key); \ + return (NAME##_type *)pthread_getspecific(NAME##_key); \ } \ \ NAME##_type *NAME##_get(void); diff --git a/lib/packets.h b/lib/packets.h index 461f488..8e1c51c 100644 --- a/lib/packets.h +++ b/lib/packets.h @@ -1123,7 +1123,8 @@ in6_addr_set_mapped_ipv4(struct in6_addr *ip6, ovs_be32 ip4) static inline ovs_be32 in6_addr_get_mapped_ipv4(const struct in6_addr *addr) { - union ovs_16aligned_in6_addr *taddr = (void *) addr; + union ovs_16aligned_in6_addr *taddr = + (union ovs_16aligned_in6_addr *) addr; if (IN6_IS_ADDR_V4MAPPED(addr)) { return get_16aligned_be32(&taddr->be32[3]); } else { @@ -1134,7 +1135,8 @@ in6_addr_get_mapped_ipv4(const struct in6_addr *addr) static inline void in6_addr_solicited_node(struct in6_addr *addr, const struct in6_addr *ip6) { - union ovs_16aligned_in6_addr *taddr = (void *) addr; + union ovs_16aligned_in6_addr *taddr = + (union ovs_16aligned_in6_addr*) addr; memset(taddr->be16, 0, sizeof(taddr->be16)); taddr->be16[0] = htons(0xff02); taddr->be16[5] = htons(0x1); @@ -1150,8 +1152,10 @@ static inline void in6_generate_eui64(struct eth_addr ea, struct in6_addr *prefix, struct in6_addr *lla) { - union ovs_16aligned_in6_addr *taddr = (void *) lla; - union ovs_16aligned_in6_addr *prefix_taddr = (void *) prefix; + union ovs_16aligned_in6_addr *taddr = + (union ovs_16aligned_in6_addr*) lla; + union ovs_16aligned_in6_addr *prefix_taddr = + (union ovs_16aligned_in6_addr*) prefix; taddr->be16[0] = prefix_taddr->be16[0]; taddr->be16[1] = prefix_taddr->be16[1]; taddr->be16[2] = prefix_taddr->be16[2]; @@ -1169,7 +1173,8 @@ in6_generate_eui64(struct eth_addr ea, struct in6_addr *prefix, static inline void in6_generate_lla(struct eth_addr ea, struct in6_addr *lla) { - union ovs_16aligned_in6_addr *taddr = (void *) lla; + union ovs_16aligned_in6_addr *taddr = + (union ovs_16aligned_in6_addr*) lla; memset(taddr->be16, 0, sizeof(taddr->be16)); taddr->be16[0] = htons(0xfe80); taddr->be16[4] = htons(((ea.ea[0] ^ 0x02) << 8) | ea.ea[1]); diff --git a/lib/socket-util.h b/lib/socket-util.h index 873a59a..47bd097 100644 --- a/lib/socket-util.h +++ b/lib/socket-util.h @@ -138,7 +138,7 @@ static inline int make_unix_socket(int style, bool nonblock, static inline int rpl_setsockopt(int sock, int level, int optname, const void *optval, socklen_t optlen) { - return (setsockopt)(sock, level, optname, optval, optlen); + return (setsockopt)(sock, level, optname, (const char*)optval, optlen); } #define getsockopt(sock, level, optname, optval, optlen) \ @@ -146,7 +146,7 @@ static inline int rpl_setsockopt(int sock, int level, int optname, static inline int rpl_getsockopt(int sock, int level, int optname, void *optval, socklen_t *optlen) { - return (getsockopt)(sock, level, optname, optval, optlen); + return (getsockopt)(sock, level, optname, (char *)optval, optlen); } #endif diff --git a/lib/unaligned.h b/lib/unaligned.h index a150d7d..1e2dcd8 100644 --- a/lib/unaligned.h +++ b/lib/unaligned.h @@ -52,11 +52,19 @@ static inline void put_unaligned_be64(ovs_be64 *, ovs_be64); * Below, "sizeof (*(P) % 1)" verifies that *P has an integer type, since * operands to % must be integers. */ +#if defined(__cplusplus) && defined(WIN32) +#define get_unaligned_u64(P) \ + ([P]{BUILD_ASSERT(sizeof *(P) == 8);}, \ + BUILD_ASSERT_GCCONLY(!TYPE_IS_SIGNED(typeof(*(P)))), \ + (void) sizeof (*(P) % 1), \ + get_unaligned_u64__((const uint64_t *) (P))) +#else #define get_unaligned_u64(P) \ (BUILD_ASSERT(sizeof *(P) == 8), \ BUILD_ASSERT_GCCONLY(!TYPE_IS_SIGNED(typeof(*(P)))), \ (void) sizeof (*(P) % 1), \ get_unaligned_u64__((const uint64_t *) (P))) +#endif #ifdef __GNUC__ /* GCC implementations. */ @@ -185,7 +193,7 @@ put_unaligned_u64(uint64_t *p, uint64_t x) { put_unaligned_u64__(p, x); } - + /* Returns the value in 'x'. */ static inline uint32_t get_16aligned_u32(const ovs_16aligned_u32 *x) @@ -220,7 +228,11 @@ put_32aligned_u64(ovs_32aligned_u64 *x, uint64_t value) static inline ovs_u128 get_32aligned_u128(const ovs_32aligned_u128 *x) { - ovs_u128 u = { .u32 = { x->u32[0], x->u32[1], x->u32[2], x->u32[3] } }; + ovs_u128 u; + u.u32[0] = x->u32[0]; + u.u32[1] = x->u32[1]; + u.u32[2] = x->u32[2]; + u.u32[3] = x->u32[3]; return u; } @@ -287,8 +299,11 @@ put_32aligned_be64(ovs_32aligned_be64 *x, ovs_be64 value) static inline ovs_be128 get_32aligned_be128(const ovs_32aligned_be128 *x) { - ovs_be128 u = { .be32 = { x->be32[0], x->be32[1], - x->be32[2], x->be32[3] } }; + ovs_be128 u; + u.be32[0] = x->be32[0]; + u.be32[1] = x->be32[1]; + u.be32[2] = x->be32[2]; + u.be32[3] = x->be32[3]; return u; } -- 2.9.0.windows.1 _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
