This is an automated email from the ASF dual-hosted git repository.
wwbmmm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/brpc.git
The following commit(s) were added to refs/heads/master by this push:
new c727dd77 Use ManualConstructor(AlignedMemory) instead of
std::aligned_storage which has been deprecated in C++23 (#2719)
c727dd77 is described below
commit c727dd77a26aed921b8d7b5fdcb83c22d6f62a02
Author: Bright Chen <[email protected]>
AuthorDate: Thu Sep 26 10:43:19 2024 +0800
Use ManualConstructor(AlignedMemory) instead of std::aligned_storage which
has been deprecated in C++23 (#2719)
---
src/butil/compiler_specific.h | 17 +++++++
src/butil/containers/flat_map.h | 23 +++++----
src/butil/containers/mpsc_queue.h | 10 ++--
src/butil/memory/aligned_memory.h | 46 +++++++++++-------
src/butil/memory/manual_constructor.h | 5 ++
src/butil/object_pool_inl.h | 17 ++++---
src/butil/resource_pool_inl.h | 91 ++++++++++++++++++-----------------
7 files changed, 128 insertions(+), 81 deletions(-)
diff --git a/src/butil/compiler_specific.h b/src/butil/compiler_specific.h
index d05da9dc..9eb4283e 100644
--- a/src/butil/compiler_specific.h
+++ b/src/butil/compiler_specific.h
@@ -124,6 +124,23 @@
// Use like:
// class ALIGNAS(16) MyClass { ... }
// ALIGNAS(16) int array[4];
+//
+// In most places you can use the C++11 keyword "alignas", which is preferred.
+//
+// But compilers have trouble mixing __attribute__((...)) syntax with
+// alignas(...) syntax.
+//
+// Doesn't work in clang or gcc:
+// struct alignas(16) __attribute__((packed)) S { char c; };
+// Works in clang but not gcc:
+// struct __attribute__((packed)) alignas(16) S2 { char c; };
+// Works in clang and gcc:
+// struct alignas(16) S3 { char c; } __attribute__((packed));
+//
+// There are also some attributes that must be specified *before* a class
+// definition: visibility (used for exporting functions/classes) is one of
+// these attributes. This means that it is not possible to use alignas() with a
+// class that is marked as exported.
#if defined(COMPILER_MSVC)
# define ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
#elif defined(COMPILER_GCC)
diff --git a/src/butil/containers/flat_map.h b/src/butil/containers/flat_map.h
index 40c44c59..955753d2 100644
--- a/src/butil/containers/flat_map.h
+++ b/src/butil/containers/flat_map.h
@@ -105,6 +105,7 @@
#include "butil/bit_array.h" // bit_array_*
#include "butil/strings/string_piece.h" // StringPiece
#include "butil/memory/scope_guard.h"
+#include "butil/memory/manual_constructor.h"
namespace butil {
@@ -265,24 +266,26 @@ public:
BucketInfo bucket_info() const;
struct Bucket {
- explicit Bucket(const _K& k) : next(NULL)
- { new (&element_spaces) Element(k); }
- Bucket(const Bucket& other) : next(NULL)
- { new (&element_spaces) Element(other.element()); }
+ explicit Bucket(const _K& k) : next(NULL) {
+ element_.Init(k);
+ }
+ Bucket(const Bucket& other) : next(NULL) {
+ element_.Init(other.element());
+ }
+
bool is_valid() const { return next != (const Bucket*)-1UL; }
void set_invalid() { next = (Bucket*)-1UL; }
// NOTE: Only be called when is_valid() is true.
Element& element() {
- void* spaces = &element_spaces; // Suppress strict-aliasing
- return *reinterpret_cast<Element*>(spaces);
+ return *element_;
}
const Element& element() const {
- const void* spaces = &element_spaces;
- return *reinterpret_cast<const Element*>(spaces);
+ return *element_;
}
Bucket *next;
- typename std::aligned_storage<sizeof(Element), alignof(Element)>::type
- element_spaces;
+
+ private:
+ ManualConstructor<Element> element_;
};
allocator_type& get_allocator() { return _pool.get_allocator(); }
diff --git a/src/butil/containers/mpsc_queue.h
b/src/butil/containers/mpsc_queue.h
index 6db50f28..6ba09db3 100644
--- a/src/butil/containers/mpsc_queue.h
+++ b/src/butil/containers/mpsc_queue.h
@@ -24,6 +24,7 @@
#include "butil/object_pool.h"
#include "butil/type_traits.h"
+#include "butil/memory/manual_constructor.h"
namespace butil {
@@ -32,8 +33,7 @@ struct BAIDU_CACHELINE_ALIGNMENT MPSCQueueNode {
static MPSCQueueNode* const UNCONNECTED;
MPSCQueueNode* next{NULL};
- char data_mem[sizeof(T)]{};
-
+ ManualConstructor<T> data_mem;
};
template <typename T>
@@ -95,7 +95,7 @@ template <typename T, typename Alloc>
void MPSCQueue<T, Alloc>::Enqueue(typename add_const_reference<T>::type data) {
auto node = (MPSCQueueNode<T>*)_alloc.Alloc();
node->next = MPSCQueueNode<T>::UNCONNECTED;
- new ((void*)&node->data_mem) T(data);
+ node->data_mem.Init(data);
EnqueueImpl(node);
}
@@ -103,7 +103,7 @@ template <typename T, typename Alloc>
void MPSCQueue<T, Alloc>::Enqueue(T&& data) {
auto node = (MPSCQueueNode<T>*)_alloc.Alloc();
node->next = MPSCQueueNode<T>::UNCONNECTED;
- new ((void*)&node->data_mem) T(std::forward<T>(data));
+ node->data_mem.Init(data);
EnqueueImpl(node);
}
@@ -136,7 +136,7 @@ bool MPSCQueue<T, Alloc>::DequeueImpl(T* data) {
}
if (data) {
- auto mem = (T* const)node->data_mem;
+ auto mem = (T* const)node->data_mem.get();
*data = std::move(*mem);
}
MPSCQueueNode<T>* old_node = node;
diff --git a/src/butil/memory/aligned_memory.h
b/src/butil/memory/aligned_memory.h
index f589ff9c..c6aaaeef 100644
--- a/src/butil/memory/aligned_memory.h
+++ b/src/butil/memory/aligned_memory.h
@@ -51,24 +51,36 @@ namespace butil {
template <size_t Size, size_t ByteAlignment>
struct AlignedMemory {};
-#define BUTIL_DECL_ALIGNED_MEMORY(byte_alignment) \
- template <size_t Size> \
- class AlignedMemory<Size, byte_alignment> { \
- public: \
- ALIGNAS(byte_alignment) uint8_t data_[Size]; \
- void* void_data() { return static_cast<void*>(data_); } \
- const void* void_data() const { \
- return static_cast<const void*>(data_); \
- } \
- template<typename Type> \
+// std::aligned_storage has been deprecated in C++23,
+// because aligned_* are harmful to codebases and should not be used.
+// For details, see
https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1413r3.pdf
+#if (__cplusplus >= 201103L)
+// In most places, use the C++11 keyword "alignas", which is preferred.
+#define DECL_ALIGNED_BUFFER(buffer_name, byte_alignment, size) \
+ alignas(byte_alignment) uint8_t buffer_name[size]
+#else
+#define DECL_ALIGNED_BUFFER(buffer_name, byte_alignment, size) \
+ ALIGNAS(byte_alignment) uint8_t buffer_name[size]
+#endif
+
+#define BUTIL_DECL_ALIGNED_MEMORY(byte_alignment) \
+ template <size_t Size> \
+ class AlignedMemory<Size, byte_alignment> { \
+ public: \
+ DECL_ALIGNED_BUFFER(data_, byte_alignment, Size); \
+ void* void_data() { return static_cast<void*>(data_); } \
+ const void* void_data() const { \
+ return static_cast<const void*>(data_); \
+ } \
+ template<typename Type> \
Type* data_as() { return static_cast<Type*>(void_data()); } \
- template<typename Type> \
- const Type* data_as() const { \
- return static_cast<const Type*>(void_data()); \
- } \
- private: \
- void* operator new(size_t); \
- void operator delete(void*); \
+ template<typename Type> \
+ const Type* data_as() const { \
+ return static_cast<const Type*>(void_data()); \
+ } \
+ private: \
+ void* operator new(size_t); \
+ void operator delete(void*); \
}
// Specialization for all alignments is required because MSVC (as of VS 2008)
diff --git a/src/butil/memory/manual_constructor.h
b/src/butil/memory/manual_constructor.h
index 5d06b949..266f6cca 100644
--- a/src/butil/memory/manual_constructor.h
+++ b/src/butil/memory/manual_constructor.h
@@ -56,6 +56,11 @@ class ManualConstructor {
inline Type& operator*() { return *get(); }
inline const Type& operator*() const { return *get(); }
+ template<typename Ctor>
+ inline void InitBy(Ctor ctor) {
+ ctor(space_.void_data());
+ }
+
// You can pass up to eight constructor arguments as arguments of Init().
inline void Init() {
new(space_.void_data()) Type;
diff --git a/src/butil/object_pool_inl.h b/src/butil/object_pool_inl.h
index 6f10e03a..71b9b890 100644
--- a/src/butil/object_pool_inl.h
+++ b/src/butil/object_pool_inl.h
@@ -29,6 +29,7 @@
#include "butil/macros.h" // BAIDU_CACHELINE_ALIGNMENT
#include "butil/scoped_lock.h" // BAIDU_SCOPED_LOCK
#include "butil/thread_local.h" // BAIDU_THREAD_LOCAL
+#include "butil/memory/manual_constructor.h"
#include <vector>
#ifdef BUTIL_OBJECT_POOL_NEED_FREE_ITEM_NUM
@@ -97,7 +98,7 @@ public:
// items in the Block are only used by the thread.
// To support cache-aligned objects, align Block.items by cacheline.
struct BAIDU_CACHELINE_ALIGNMENT Block {
- char items[sizeof(T) * BLOCK_NITEM];
+ ManualConstructor<T> items[BLOCK_NITEM];
size_t nitem;
Block() : nitem(0) {}
@@ -145,7 +146,7 @@ public:
// which may include parenthesis because when T is POD, "new T()"
// and "new T" are different: former one sets all fields to 0 which
// we don't want.
-#define BAIDU_OBJECT_POOL_GET(CTOR_ARGS) \
+#define BAIDU_OBJECT_POOL_GET(...) \
/* Fetch local free ptr */ \
if (_cur_free.nfree) { \
BAIDU_OBJECT_POOL_FREE_ITEM_NUM_SUB1; \
@@ -158,9 +159,13 @@ public:
BAIDU_OBJECT_POOL_FREE_ITEM_NUM_SUB1; \
return _cur_free.ptrs[--_cur_free.nfree]; \
} \
+ T* obj = NULL; \
+ auto ctor = [&](void* mem) { \
+ obj = new (mem) T(__VA_ARGS__); \
+ }; \
/* Fetch memory from local block */ \
if (_cur_block && _cur_block->nitem < BLOCK_NITEM) { \
- T* obj = new ((T*)_cur_block->items + _cur_block->nitem) T
CTOR_ARGS; \
+ (_cur_block->items + _cur_block->nitem)->InitBy(ctor); \
if (!ObjectPoolValidator<T>::validate(obj)) { \
obj->~T(); \
return NULL; \
@@ -171,7 +176,7 @@ public:
/* Fetch a Block from global */ \
_cur_block = add_block(&_cur_block_index); \
if (_cur_block != NULL) { \
- T* obj = new ((T*)_cur_block->items + _cur_block->nitem) T
CTOR_ARGS; \
+ (_cur_block->items + _cur_block->nitem)->InitBy(ctor); \
if (!ObjectPoolValidator<T>::validate(obj)) { \
obj->~T(); \
return NULL; \
@@ -188,12 +193,12 @@ public:
template <typename A1>
inline T* get(const A1& a1) {
- BAIDU_OBJECT_POOL_GET((a1));
+ BAIDU_OBJECT_POOL_GET(a1);
}
template <typename A1, typename A2>
inline T* get(const A1& a1, const A2& a2) {
- BAIDU_OBJECT_POOL_GET((a1, a2));
+ BAIDU_OBJECT_POOL_GET(a1, a2);
}
#undef BAIDU_OBJECT_POOL_GET
diff --git a/src/butil/resource_pool_inl.h b/src/butil/resource_pool_inl.h
index 4ebe4ad1..2ca858ad 100644
--- a/src/butil/resource_pool_inl.h
+++ b/src/butil/resource_pool_inl.h
@@ -29,6 +29,7 @@
#include "butil/macros.h" // BAIDU_CACHELINE_ALIGNMENT
#include "butil/scoped_lock.h" // BAIDU_SCOPED_LOCK
#include "butil/thread_local.h" // thread_atexit
+#include "butil/memory/manual_constructor.h"
#include <vector>
#ifdef BUTIL_RESOURCE_POOL_NEED_FREE_ITEM_NUM
@@ -113,7 +114,7 @@ public:
// items in the Block are only used by the thread.
// To support cache-aligned objects, align Block.items by cacheline.
struct BAIDU_CACHELINE_ALIGNMENT Block {
- char items[sizeof(T) * BLOCK_NITEM];
+ ManualConstructor<T> items[BLOCK_NITEM];
size_t nitem;
Block() : nitem(0) {}
@@ -162,48 +163,52 @@ public:
// which may include parenthesis because when T is POD, "new T()"
// and "new T" are different: former one sets all fields to 0 which
// we don't want.
-#define BAIDU_RESOURCE_POOL_GET(CTOR_ARGS) \
- /* Fetch local free id */ \
- if (_cur_free.nfree) { \
+#define BAIDU_RESOURCE_POOL_GET(...) \
+ /* Fetch local free id */ \
+ if (_cur_free.nfree) { \
const ResourceId<T> free_id = _cur_free.ids[--_cur_free.nfree]; \
- *id = free_id; \
- BAIDU_RESOURCE_POOL_FREE_ITEM_NUM_SUB1; \
- return unsafe_address_resource(free_id); \
- } \
- /* Fetch a FreeChunk from global. \
- TODO: Popping from _free needs to copy a FreeChunk which is \
- costly, but hardly impacts amortized performance. */ \
- if (_pool->pop_free_chunk(_cur_free)) { \
- --_cur_free.nfree; \
- const ResourceId<T> free_id = _cur_free.ids[_cur_free.nfree]; \
- *id = free_id; \
- BAIDU_RESOURCE_POOL_FREE_ITEM_NUM_SUB1; \
- return unsafe_address_resource(free_id); \
- } \
- /* Fetch memory from local block */ \
- if (_cur_block && _cur_block->nitem < BLOCK_NITEM) { \
+ *id = free_id; \
+ BAIDU_RESOURCE_POOL_FREE_ITEM_NUM_SUB1; \
+ return unsafe_address_resource(free_id); \
+ } \
+ /* Fetch a FreeChunk from global. \
+ TODO: Popping from _free needs to copy a FreeChunk which is \
+ costly, but hardly impacts amortized performance. */ \
+ if (_pool->pop_free_chunk(_cur_free)) { \
+ --_cur_free.nfree; \
+ const ResourceId<T> free_id = _cur_free.ids[_cur_free.nfree]; \
+ *id = free_id; \
+ BAIDU_RESOURCE_POOL_FREE_ITEM_NUM_SUB1; \
+ return unsafe_address_resource(free_id); \
+ } \
+ T* p = NULL; \
+ auto ctor = [&](void* mem) { \
+ p = new (mem) T(__VA_ARGS__); \
+ }; \
+ /* Fetch memory from local block */ \
+ if (_cur_block && _cur_block->nitem < BLOCK_NITEM) { \
id->value = _cur_block_index * BLOCK_NITEM + _cur_block->nitem; \
- T* p = new ((T*)_cur_block->items + _cur_block->nitem) T
CTOR_ARGS; \
- if (!ResourcePoolValidator<T>::validate(p)) { \
- p->~T(); \
- return NULL; \
- } \
- ++_cur_block->nitem; \
- return p; \
- } \
- /* Fetch a Block from global */ \
- _cur_block = add_block(&_cur_block_index); \
- if (_cur_block != NULL) { \
+ (_cur_block->items + _cur_block->nitem)->InitBy(ctor); \
+ if (!ResourcePoolValidator<T>::validate(p)) { \
+ p->~T(); \
+ return NULL; \
+ } \
+ ++_cur_block->nitem; \
+ return p; \
+ } \
+ /* Fetch a Block from global */ \
+ _cur_block = add_block(&_cur_block_index); \
+ if (_cur_block != NULL) { \
id->value = _cur_block_index * BLOCK_NITEM + _cur_block->nitem; \
- T* p = new ((T*)_cur_block->items + _cur_block->nitem) T
CTOR_ARGS; \
- if (!ResourcePoolValidator<T>::validate(p)) { \
- p->~T(); \
- return NULL; \
- } \
- ++_cur_block->nitem; \
- return p; \
- } \
- return NULL; \
+ (_cur_block->items + _cur_block->nitem)->InitBy(ctor); \
+ if (!ResourcePoolValidator<T>::validate(p)) { \
+ p->~T(); \
+ return NULL; \
+ } \
+ ++_cur_block->nitem; \
+ return p; \
+ } \
+ return NULL; \
inline T* get(ResourceId<T>* id) {
@@ -212,12 +217,12 @@ public:
template <typename A1>
inline T* get(ResourceId<T>* id, const A1& a1) {
- BAIDU_RESOURCE_POOL_GET((a1));
+ BAIDU_RESOURCE_POOL_GET(a1);
}
template <typename A1, typename A2>
inline T* get(ResourceId<T>* id, const A1& a1, const A2& a2) {
- BAIDU_RESOURCE_POOL_GET((a1, a2));
+ BAIDU_RESOURCE_POOL_GET(a1, a2);
}
#undef BAIDU_RESOURCE_POOL_GET
@@ -384,7 +389,7 @@ private:
// Create a Block and append it to right-most BlockGroup.
static Block* add_block(size_t* index) {
- Block* const new_block = new(std::nothrow) Block;
+ Block* const new_block = new (std::nothrow) Block;
if (NULL == new_block) {
return NULL;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]