This is an automated email from the ASF dual-hosted git repository.

amc pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
     new ad7a339  MemArena: overhaul internals and clean up API.
ad7a339 is described below

commit ad7a3398416ef1406250df50c8a0c49870378d9e
Author: Alan M. Carroll <a...@apache.org>
AuthorDate: Fri May 4 07:55:31 2018 -0500

    MemArena: overhaul internals and clean up API.
    
    This adjusts the internals to be simpler and more in line with the original 
design. In particular there are at most two
    generations and that only during a freeze. Added the ability to defer 
internal allocation until first external request.
    Added ability to set a minimum size for the next internal allocation block.
---
 CMakeLists.txt                      |   4 +
 cmd/traffic_cache_tool/CacheDefs.cc |   2 +-
 lib/ts/BufferWriterFormat.cc        |   4 +-
 lib/ts/MemArena.cc                  | 172 ++++++++++++--------------
 lib/ts/MemArena.h                   | 192 +++++++++++++++++++++--------
 lib/ts/MemSpan.h                    |  86 ++++++++++---
 lib/ts/unit-tests/test_MemArena.cc  | 233 +++++++++++++++++-------------------
 7 files changed, 407 insertions(+), 286 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 537fe03..523b643 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1269,6 +1269,8 @@ add_library(libtsutil SHARED
         lib/ts/Map.h
         lib/ts/MatcherUtils.cc
         lib/ts/MatcherUtils.h
+        lib/ts/MemArena.h
+        lib/ts/MemArena.cc
         lib/ts/MemSpan.h
         lib/ts/mkdfa.c
         lib/ts/MMH.cc
@@ -1332,6 +1334,8 @@ add_executable(test_tslib
        lib/ts/unit-tests/test_ink_inet.cc
        lib/ts/unit-tests/test_IpMap.cc
        lib/ts/unit-tests/test_layout.cc
+       lib/ts/unit-tests/test_MemArena.cc
+       lib/ts/unit-tests/test_MemSpan.cc
        lib/ts/unit-tests/test_MT_hashtable.cc
        lib/ts/unit-tests/test_Scalar.cc
        lib/ts/unit-tests/test_string_view.cc
diff --git a/cmd/traffic_cache_tool/CacheDefs.cc 
b/cmd/traffic_cache_tool/CacheDefs.cc
index 5e267cb..32a3c20 100644
--- a/cmd/traffic_cache_tool/CacheDefs.cc
+++ b/cmd/traffic_cache_tool/CacheDefs.cc
@@ -270,7 +270,7 @@ Stripe::validateMeta(StripeMeta const *meta)
 bool
 Stripe::probeMeta(MemSpan &mem, StripeMeta const *base_meta)
 {
-  while (mem.usize() >= sizeof(StripeMeta)) {
+  while (mem.size() >= sizeof(StripeMeta)) {
     StripeMeta const *meta = mem.ptr<StripeMeta>(0);
     if (this->validateMeta(meta) && (base_meta == nullptr ||               // 
no base version to check against.
                                      (meta->version == base_meta->version) // 
need more checks here I think.
diff --git a/lib/ts/BufferWriterFormat.cc b/lib/ts/BufferWriterFormat.cc
index 8adb5ff..463f98b 100644
--- a/lib/ts/BufferWriterFormat.cc
+++ b/lib/ts/BufferWriterFormat.cc
@@ -591,14 +591,14 @@ bwformat(BufferWriter &w, BWFSpec const &spec, 
string_view sv)
 BufferWriter &
 bwformat(BufferWriter &w, BWFSpec const &spec, MemSpan const &span)
 {
-  static const BWFormat default_fmt{"{:#x}@{}"};
+  static const BWFormat default_fmt{"{:#x}@{:p}"};
   if (spec._ext.size() && 'd' == spec._ext.front()) {
     const char *digits = 'X' == spec._type ? bw_fmt::UPPER_DIGITS : 
bw_fmt::LOWER_DIGITS;
     if (spec._radix_lead_p) {
       w.write('0');
       w.write(digits[33]);
     }
-    bw_fmt::Hex_Dump(w, string_view{static_cast<char *>(span.data()), 
span.usize()}, digits);
+    bw_fmt::Hex_Dump(w, span.view(), digits);
   } else {
     w.print(default_fmt, span.size(), span.data());
   }
diff --git a/lib/ts/MemArena.cc b/lib/ts/MemArena.cc
index d4af156..22e18cb 100644
--- a/lib/ts/MemArena.cc
+++ b/lib/ts/MemArena.cc
@@ -28,14 +28,6 @@
 
 using namespace ts;
 
-inline MemArena::Block::Block(size_t n) : size(n), allocated(0), next(nullptr) 
{}
-
-inline char *
-MemArena::Block::data()
-{
-  return reinterpret_cast<char *>(this + 1);
-}
-
 /**
     Allocates a new internal block of memory. If there are no existing blocks, 
this becomes the head of the
      ll. If there are existing allocations, the new block is inserted in the 
current list.
@@ -45,37 +37,38 @@ MemArena::Block::data()
 inline MemArena::Block *
 MemArena::newInternalBlock(size_t n, bool custom)
 {
-  // Adjust to the nearest power of two. Works for 64 bit values. Allocate 
Block header and
-  //  actual underlying memory together for locality. ALLOC_HEADER_SIZE to 
account for malloc/free headers.
-  static constexpr size_t free_space_per_page = DEFAULT_PAGE_SIZE - 
sizeof(Block) - ALLOC_HEADER_SIZE;
-
-  void *tmp;
-  if (n <= free_space_per_page) { // will fit within one page, just allocate.
-    tmp = ats_malloc(n + sizeof(Block));
+  // Allocate Block header and actual underlying memory together for locality 
and fewer calls.
+  // ALLOC_HEADER_SIZE to account for malloc/free headers to try to minimize 
pages required.
+  static constexpr size_t FREE_SPACE_PER_PAGE = DEFAULT_PAGE_SIZE - 
sizeof(Block) - ALLOC_HEADER_SIZE;
+  static_assert(ALLOC_MIN_SIZE > ALLOC_HEADER_SIZE,
+                "ALLOC_MIN_SIZE must be larger than ALLOC_HEADER_SIZE to 
ensure positive allocation request size.");
+
+  // If post-freeze or reserved, bump up block size then clear.
+  n               = std::max({n, next_block_size, ALLOC_MIN_SIZE});
+  next_block_size = 0;
+
+  if (n <= FREE_SPACE_PER_PAGE) { // will fit within one page, just allocate.
+    n += sizeof(Block);           // can just allocate that much with the 
Block.
   } else {
-    size_t t = n;
-    t--;
-    t |= t >> 1;
-    t |= t >> 2;
-    t |= t >> 4;
-    t |= t >> 8;
-    t |= t >> 16;
-    t |= t >> 32;
-    t++;
-    n   = t - sizeof(Block) - ALLOC_HEADER_SIZE; // n is the actual amount of 
memory the block can allocate out.
-    tmp = ats_malloc(t - ALLOC_HEADER_SIZE);
+    // Round up to next power of 2 and allocate that.
+    --n;
+    n |= n >> 1;
+    n |= n >> 2;
+    n |= n >> 4;
+    n |= n >> 8;
+    n |= n >> 16;
+    n |= n >> 32;
+    ++n;                    // power of 2 now.
+    n -= ALLOC_HEADER_SIZE; // clip presumed malloc header size.
   }
 
-  std::shared_ptr<Block> block(new (tmp) Block(n)); // placement new
+  // Allocate space for the Block instance and the request memory.
+  std::shared_ptr<Block> block(new (ats_malloc(n)) Block(n - sizeof(Block)));
 
   if (current) {
-    arena_size += n;
-    generation_size += n;
-
     if (!custom) {
       block->next = current;
       current     = block;
-      return current.get();
     } else {
       // Situation where we do not have enough space for a large block of 
memory. We don't want
       //  to update @current because it would be wasting memory. Create a new 
block for the entire
@@ -84,23 +77,16 @@ MemArena::newInternalBlock(size_t n, bool custom)
       current->next = block;
     }
   } else { // empty
-    generation_size = n;
-    arena_size      = n;
-
-    generation = current = block;
+    current = block;
   }
 
   return block.get();
 }
 
-MemArena::MemArena()
-{
-  newInternalBlock(arena_size, true); // nDefault size
-}
-
 MemArena::MemArena(size_t n)
 {
-  newInternalBlock(n, true);
+  next_block_size = 0; // don't force larger size.
+  this->newInternalBlock(n, true);
 }
 
 /**
@@ -110,99 +96,91 @@ MemArena::MemArena(size_t n)
 MemSpan
 MemArena::alloc(size_t n)
 {
-  total_alloc += n;
-
-  // Two cases when we want a new internal block:
-  //   1. A new generation.
-  //   2. Current internal block isn't large enough to alloc
-  //       @n bytes.
-
   Block *block = nullptr;
 
-  if (!generation) { // allocation after a freeze. new generation.
-    generation_size = 0;
+  current_alloc += n;
 
-    next_block_size = (next_block_size < n) ? n : next_block_size;
-    block           = newInternalBlock(next_block_size, false);
-
-    // current is updated in newInternalBlock.
-    generation = current;
-  } else if (current->size - current->allocated /* remaining size */ < n) {
-    if (n >= DEFAULT_PAGE_SIZE && n >= (current->size / 2)) {
-      block = newInternalBlock(n, true);
+  if (!current) {
+    block = this->newInternalBlock(n, false);
+  } else {
+    if (current->size - current->allocated /* remaining size */ < n) {
+      if (n >= DEFAULT_PAGE_SIZE && n >= (current->size / 2)) {
+        block = this->newInternalBlock(n, true);
+      } else {
+        block = this->newInternalBlock(current->size * 2, false);
+      }
     } else {
-      block = newInternalBlock(current->size * 2, false);
+      block = current.get();
     }
-  } else {
-    // All good. Simply allocate.
-    block = current.get();
   }
 
   ink_assert(block->data() != nullptr);
   ink_assert(block->size >= n);
 
-  uint64_t offset = block->allocated;
+  auto zret = block->remnant().prefix(n);
   block->allocated += n;
 
-  // Allocate a span of memory within the block.
-  MemSpan ret(block->data() + offset, n);
-  return ret;
+  return zret;
 }
 
 MemArena &
 MemArena::freeze(size_t n)
 {
-  generation      = nullptr;
-  next_block_size = n ? n : total_alloc;
-  prev_alloc      = total_alloc;
+  prev            = current;
+  prev_alloc      = current_alloc;
+  current         = nullptr;
+  next_block_size = n ? n : current_alloc;
+  current_alloc   = 0;
 
   return *this;
 }
 
-/**
-    Everything up the current generation is considered frozen and will be
-     thawed away (deallocated).
- */
 MemArena &
 MemArena::thaw()
 {
-  // A call to thaw a frozen generation before any allocation. Empty the arena.
-  if (!generation) {
-    return empty();
-  }
-
-  arena_size = generation_size;
-  total_alloc -= prev_alloc;
   prev_alloc = 0;
-
-  generation->next = nullptr;
+  prev       = nullptr;
   return *this;
 }
 
-/**
-    Check if a pointer is in the arena. Need to search through all the 
internal blocks.
- */
 bool
-MemArena::contains(void *ptr) const
+MemArena::contains(const void *ptr) const
 {
-  Block *tmp = current.get();
-  while (tmp) {
-    if (ptr >= tmp->data() && ptr < tmp->data() + tmp->size) {
+  for (Block *b = current.get(); b; b = b->next.get()) {
+    if (b->contains(ptr)) {
+      return true;
+    }
+  }
+  for (Block *b = prev.get(); b; b = b->next.get()) {
+    if (b->contains(ptr)) {
       return true;
     }
-    tmp = tmp->next.get();
   }
+
   return false;
 }
 
 MemArena &
-MemArena::empty()
+MemArena::clear()
 {
-  generation = nullptr;
-  current    = nullptr;
-
-  arena_size = generation_size = 0;
-  total_alloc = prev_alloc = 0;
+  prev          = nullptr;
+  prev_alloc    = 0;
+  current       = nullptr;
+  current_alloc = 0;
 
   return *this;
-}
\ No newline at end of file
+}
+
+size_t
+MemArena::extent() const
+{
+  size_t zret{0};
+  Block *b;
+  for (b = current.get(); b; b = b->next.get()) {
+    zret += b->size;
+  }
+  for (b = prev.get(); b; b = b->next.get()) {
+    zret += b->size;
+  }
+  return zret;
+};
\ No newline at end of file
diff --git a/lib/ts/MemArena.h b/lib/ts/MemArena.h
index 683772b..b635689 100644
--- a/lib/ts/MemArena.h
+++ b/lib/ts/MemArena.h
@@ -42,22 +42,49 @@ namespace ts
  */
 class MemArena
 {
+  using self_type = MemArena; ///< Self reference type.
 public:
   /** Simple internal arena block of memory. Maintains the underlying memory.
    */
   struct Block {
-    size_t size;
-    size_t allocated;
-    std::shared_ptr<Block> next;
-
+    size_t size;                 ///< Actual block size.
+    size_t allocated{0};         ///< Current allocated (in use) bytes.
+    std::shared_ptr<Block> next; ///< Previously allocated block list.
+
+    /** Construct to have @a n bytes of available storage.
+     *
+     * Note this is descriptive - this presumes use via placement new and the 
size value describes
+     * memory already allocated immediately after this instance.
+     * @param n The amount of storage.
+     */
     Block(size_t n);
+    /// Get the start of the data in this block.
     char *data();
+    /// Get the start of the data in this block.
+    const char *data() const;
+    /// Amount of unallocated storage.
+    size_t remaining() const;
+    /// Span of unallocated storage.
+    MemSpan remnant();
+    /** Check if the byte at address @a ptr is in this block.
+     *
+     * @param ptr Address of byte to check.
+     * @return @c true if @a ptr is in this block, @c false otherwise.
+     */
+    bool contains(const void *ptr) const;
   };
 
+  /** Default constructor.
+   * Construct with no memory.
+   */
   MemArena();
+  /** Construct with @a n bytes of storage.
+   *
+   * @param n Number of bytes in the initial block.
+   */
   explicit MemArena(size_t n);
 
-  /** MemSpan alloc(size_t n)
+  /** Allocate @a n bytes of storage.
 
       Returns a span of memory within the arena. alloc() is self expanding but 
DOES NOT self coalesce. This means
       that no matter the arena size, the caller will always be able to alloc() 
@a n bytes.
@@ -67,19 +94,30 @@ public:
    */
   MemSpan alloc(size_t n);
 
-  /** MemArena& freeze(size_t n = 0)
+  /** Adjust future block allocation size.
+   * This does not cause allocation, but instead makes a note of the size @a n 
and when a new block
+   * is needed, it will be at least @a n bytes. This is most useful for 
default constructed instances
+   * where the initial allocation should be delayed until use.
+   * @param n Minimum size of next allocated block.
+   * @return @a this
+   */
+  self_type &reserve(size_t n);
+
+  /** Freeze memory allocation.
 
       Will "freeze" a generation of memory. Any memory previously allocated 
can still be used. This is an
       important distinction as freeze does not mean that the memory is 
immutable, only that subsequent allocations
       will be in a new generation.
 
+      If @a n == 0, the first block of next generation will be large enough to 
hold all existing allocations.
+      This enables coalescence for locality of reference.
+
       @param n Number of bytes for new generation.
-        if @a n == 0, the next generation will be large enough to hold all 
existing allocations.
       @return @c *this
    */
   MemArena &freeze(size_t n = 0);
 
-  /** MemArena& thaw()
+  /** Unfreeze memory allocation, discard previously frozen memory.
 
       Will "thaw" away any previously frozen generations. Any generation that 
is not the current generation is considered
       frozen because there is no way to allocate in any of those memory 
blocks. thaw() is the only mechanism for deallocating
@@ -90,43 +128,36 @@ public:
    */
   MemArena &thaw();
 
-  /** MemArena& empty
+  /** Release all memory.
 
       Empties the entire arena and deallocates all underlying memory. Next 
block size will be equal to the sum of all
       allocations before the call to empty.
    */
-  MemArena &empty();
+  MemArena &clear();
 
-  /// @returns the current generation @c size.
-  size_t
-  size() const
-  {
-    return arena_size;
-  }
+  /// @returns the memory allocated in the generation.
+  size_t size() const;
 
   /// @returns the @c remaining space within the generation.
-  size_t
-  remaining() const
-  {
-    return (current) ? current->size - current->allocated : 0;
-  }
+  size_t remaining() const;
+
+  /// @returns the remaining contiguous space in the active generation.
+  MemSpan remnant() const;
 
   /// @returns the total number of bytes allocated within the arena.
-  size_t
-  allocated_size() const
-  {
-    return total_alloc;
-  }
-
-  /// @returns the number of bytes that have not been allocated within the 
arena
-  size_t
-  unallocated_size() const
-  {
-    return size() - allocated_size();
-  }
-
-  /// @return a @c true if @ptr is in memory owned by this arena, @c false if 
not.
-  bool contains(void *ptr) const;
+  size_t allocated_size() const;
+
+  /** Check if a the byte at @a ptr is in memory owned by this arena.
+   *
+   * @param ptr Address of byte to check.
+   * @return @c true if the byte at @a ptr is in the arena, @c false if not.
+   */
+  bool contains(const void *ptr) const;
+
+  /** Total memory footprint, including wasted space.
+   * @return Total memory footprint.
+   */
+  size_t extent() const;
 
 private:
   /// creates a new @Block of size @n and places it within the @allocations 
list.
@@ -135,19 +166,86 @@ private:
 
   static constexpr size_t DEFAULT_BLOCK_SIZE = 1 << 15; ///< 32kb
   static constexpr size_t DEFAULT_PAGE_SIZE  = 1 << 12; ///< 4kb
-  static constexpr size_t ALLOC_HEADER_SIZE  = 16;
+  static constexpr size_t ALLOC_HEADER_SIZE  = 16;      ///< Guess of overhead 
of @c malloc
+  /// Never allocate less than this.
+  static constexpr size_t ALLOC_MIN_SIZE = 2 * ALLOC_HEADER_SIZE;
 
-  /** generation_size and prev_alloc are used to help quickly figure out the 
arena
-        info (arena_size and total_alloc) after a thaw().
-   */
-  size_t arena_size      = DEFAULT_BLOCK_SIZE; ///< --all
-  size_t generation_size = 0;                  ///< Size of current generation 
-- all
-  size_t total_alloc     = 0;                  ///< Total number of bytes 
allocated in the arena -- allocated
-  size_t prev_alloc      = 0;                  ///< Total allocations before 
current generation -- allocated
+  size_t current_alloc = 0; ///< Total allocations in the active generation.
+  /// Total allocations in the previous generation. This is only non-zero 
while the arena is frozen.
+  size_t prev_alloc = 0;
 
-  size_t next_block_size = 0; ///< Next internal block size
+  size_t next_block_size = DEFAULT_BLOCK_SIZE; ///< Next internal block size
 
-  std::shared_ptr<Block> generation = nullptr; ///< Marks current generation
-  std::shared_ptr<Block> current    = nullptr; ///< Head of allocations list. 
Allocate from this.
+  std::shared_ptr<Block> prev    = nullptr; ///< Previous generation.
+  std::shared_ptr<Block> current = nullptr; ///< Head of allocations list. 
Allocate from this.
 };
+
+// Implementation
+
+inline MemArena::Block::Block(size_t n) : size(n) {}
+
+inline char *
+MemArena::Block::data()
+{
+  return reinterpret_cast<char *>(this + 1);
+}
+
+inline const char *
+MemArena::Block::data() const
+{
+  return reinterpret_cast<const char *>(this + 1);
+}
+
+inline bool
+MemArena::Block::contains(const void *ptr) const
+{
+  const char *base = this->data();
+  return base <= ptr && ptr < base + size;
+}
+
+inline size_t
+MemArena::Block::remaining() const
+{
+  return size - allocated;
+}
+
+inline MemArena::MemArena() {}
+
+inline MemSpan
+MemArena::Block::remnant()
+{
+  return {this->data() + allocated, static_cast<ptrdiff_t>(this->remaining())};
+}
+
+inline size_t
+MemArena::size() const
+{
+  return current_alloc;
+}
+
+inline size_t
+MemArena::allocated_size() const
+{
+  return prev_alloc + current_alloc;
+}
+
+inline MemArena &
+MemArena::reserve(size_t n)
+{
+  next_block_size = n;
+  return *this;
+}
+
+inline size_t
+MemArena::remaining() const
+{
+  return current ? current->remaining() : 0;
+}
+
+inline MemSpan
+MemArena::remnant() const
+{
+  return current ? current->remnant() : MemSpan{};
+}
+
 } // namespace ts
diff --git a/lib/ts/MemSpan.h b/lib/ts/MemSpan.h
index 5c63802..a7b7765 100644
--- a/lib/ts/MemSpan.h
+++ b/lib/ts/MemSpan.h
@@ -29,6 +29,7 @@
 #include <iosfwd>
 #include <iostream>
 #include <cstddef>
+#include <ts/string_view.h>
 
 /// Apache Traffic Server commons.
 namespace ts
@@ -136,22 +137,26 @@ public:
   /// @name Accessors.
   //@{
   /// Pointer to the first byte in the span.
-  void *begin() const;
+  char *begin();
+  const char *begin() const;
 
   /// Pointer to first byte not in the span.
-  void *end() const;
+  char *end();
+  const char *end() const;
 
   /// Number of bytes in the span.
-  constexpr ptrdiff_t size() const;
-  /// Number of bytes in the span (unsigned).
-  constexpr size_t usize() const;
+  constexpr ptrdiff_t ssize() const;
+  size_t size() const;
 
-  /// Memory pointer.
-  /// @note This is equivalent to @c begin currently but it's probably good to 
have separation.
-  constexpr void *data() const;
+  /// Pointer to memory in the span.
+  void *data();
+
+  /// Pointer to memory in the span.
+  const void *data() const;
 
   /// Memory pointer, one past the last element of the span.
-  void *data_end() const;
+  void *data_end();
+  const void *data_end() const;
 
   /// @return the @a V value at index @a n.
   template <typename V> V at(ptrdiff_t n) const;
@@ -259,6 +264,18 @@ public:
    */
   self_type &remove_suffix(ptrdiff_t n);
 
+  /** Return a view of the memory.
+   *
+   * @return A @c string_view covering the span contents.
+   */
+  string_view view() const;
+
+  /** Support automatic conversion to string_view.
+   *
+   * @return A view of the memory in this span.
+   */
+  operator string_view() const;
+
   /// Internal utility for computing the difference of two void pointers.
   /// @return the byte (char) difference between the pointers, @a lhs - @a rhs
   static ptrdiff_t distance(void const *lhs, void const *rhs);
@@ -389,40 +406,64 @@ MemSpan::operator+=(ptrdiff_t n)
   return *this;
 }
 
-inline void *
+inline char *
+MemSpan::begin()
+{
+  return static_cast<char *>(_data);
+}
+
+inline const char *
 MemSpan::begin() const
 {
+  return static_cast<const char *>(_data);
+}
+
+inline void *
+MemSpan::data()
+{
   return _data;
 }
 
-inline constexpr void *
+inline const void *
 MemSpan::data() const
 {
   return _data;
 }
 
-inline void *
-MemSpan::end() const
+inline char *
+MemSpan::end()
 {
   return static_cast<char *>(_data) + _size;
 }
 
+inline const char *
+MemSpan::end() const
+{
+  return static_cast<const char *>(_data) + _size;
+}
+
 inline void *
+MemSpan::data_end()
+{
+  return static_cast<char *>(_data) + _size;
+}
+
+inline const void *
 MemSpan::data_end() const
 {
   return static_cast<char *>(_data) + _size;
 }
 
 inline constexpr ptrdiff_t
-MemSpan::size() const
+MemSpan::ssize() const
 {
   return _size;
 }
 
-inline constexpr size_t
-MemSpan::usize() const
+inline size_t
+MemSpan::size() const
 {
-  return _size;
+  return static_cast<size_t>(_size);
 }
 
 inline MemSpan &
@@ -554,6 +595,17 @@ MemSpan::find_if(F const &pred)
   return nullptr;
 }
 
+inline string_view
+MemSpan::view() const
+{
+  return {static_cast<const char *>(_data), static_cast<size_t>(_size)};
+}
+
+inline MemSpan::operator string_view() const
+{
+  return this->view();
+}
+
 } // namespace ts
 
 namespace std
diff --git a/lib/ts/unit-tests/test_MemArena.cc 
b/lib/ts/unit-tests/test_MemArena.cc
index 98829f3..69bb2f1 100644
--- a/lib/ts/unit-tests/test_MemArena.cc
+++ b/lib/ts/unit-tests/test_MemArena.cc
@@ -27,163 +27,152 @@
 
 TEST_CASE("MemArena generic", "[libts][MemArena]")
 {
-  ts::MemArena *arena = new ts::MemArena(64);
-  REQUIRE(arena->size() == 64);
-  ts::MemSpan span1 = arena->alloc(32);
-  ts::MemSpan span2 = arena->alloc(32);
+  ts::MemArena arena{64};
+  REQUIRE(arena.size() == 0);
+  ts::MemSpan span1 = arena.alloc(32);
+  ts::MemSpan span2 = arena.alloc(32);
 
   REQUIRE(span1.size() == 32);
   REQUIRE(span2.size() == 32);
-  REQUIRE(span1 != span2);
+  REQUIRE(span1.data() != span2.data());
 
-  arena->freeze(); // second gen - 128b
+  arena.freeze();
+  REQUIRE(arena.size() == 0);
+  REQUIRE(arena.allocated_size() == 64);
 
-  span1 = arena->alloc(64);
+  span1 = arena.alloc(64);
   REQUIRE(span1.size() == 64);
-  REQUIRE(arena->size() == 128);
+  REQUIRE(arena.size() == 64);
+  arena.thaw();
+  REQUIRE(arena.size() == 64);
+  REQUIRE(arena.allocated_size() == 64);
 
-  arena->freeze(); // third gen - 256 b
-  span1 = arena->alloc(128);
+  arena.freeze();
+  span1 = arena.alloc(128);
   REQUIRE(span1.size() == 128);
-  REQUIRE(arena->size() == 256);
-  REQUIRE(arena->allocated_size() == 256);
-  REQUIRE(arena->remaining() == 0);
-  REQUIRE(arena->unallocated_size() == 0);
-
-  arena->thaw();
-  REQUIRE(arena->size() == 128);
-  REQUIRE(span1.size() == 128);
-  REQUIRE(arena->contains((char *)span1.data()));
-  REQUIRE(arena->remaining() == 0);
-
-  // scale down
-  arena->freeze(); // fourth gen - 128 b
-  arena->thaw();
-  REQUIRE(arena->size() == 0);
-  REQUIRE(arena->remaining() == 0);
-
-  arena->alloc(120);
-  REQUIRE(arena->size() == 128);
-  REQUIRE(arena->remaining() == 8);
-
-  delete arena;
+  REQUIRE(arena.size() == 128);
+  REQUIRE(arena.allocated_size() == 192);
+  REQUIRE(arena.remaining() == 0);
+  REQUIRE(arena.contains((char *)span1.data()));
+
+  arena.thaw();
+  REQUIRE(arena.size() == 128);
+  REQUIRE(arena.remaining() == 0);
 }
 
 TEST_CASE("MemArena freeze and thaw", "[libts][MemArena]")
 {
-  ts::MemArena *arena = new ts::MemArena(64);
-  arena->freeze();
-  REQUIRE(arena->size() == 64);
-  arena->alloc(64);
-  REQUIRE(arena->size() == 128);
-  arena->thaw();
-  REQUIRE(arena->size() == 64);
-  arena->freeze();
-  arena->thaw();
-  REQUIRE(arena->size() == 0);
-  REQUIRE(arena->remaining() == 0);
-
-  arena->alloc(1024);
-  REQUIRE(arena->size() == 1024);
-  arena->freeze();
-  REQUIRE(arena->size() == 1024);
-  arena->thaw();
-  REQUIRE(arena->size() == 0);
-
-  arena->freeze(64); // scale down
-  arena->alloc(64);
-  REQUIRE(arena->size() == 64);
-  REQUIRE(arena->remaining() == 0);
-
-  arena->empty();
-  REQUIRE(arena->size() == 0);
-  REQUIRE(arena->remaining() == 0);
-  REQUIRE(arena->allocated_size() == 0);
-  REQUIRE(arena->unallocated_size() == 0);
+  ts::MemArena arena{64};
+  arena.freeze();
+  REQUIRE(arena.size() == 0);
+  arena.alloc(64);
+  REQUIRE(arena.size() == 64);
+  arena.thaw();
+  REQUIRE(arena.size() == 64);
+  arena.freeze();
+  arena.thaw();
+  REQUIRE(arena.size() == 0);
+  REQUIRE(arena.remaining() == 0);
+
+  arena.alloc(1024);
+  REQUIRE(arena.size() == 1024);
+  arena.freeze();
+  REQUIRE(arena.size() == 0);
+  REQUIRE(arena.allocated_size() == 1024);
+  REQUIRE(arena.extent() >= 1024);
+  arena.thaw();
+  REQUIRE(arena.size() == 0);
+  REQUIRE(arena.extent() == 0);
+
+  arena.freeze(64); // scale down
+  arena.alloc(64);
+  REQUIRE(arena.size() == 64);
+  REQUIRE(arena.remaining() == 0);
+
+  arena.clear();
+  REQUIRE(arena.size() == 0);
+  REQUIRE(arena.remaining() == 0);
+  REQUIRE(arena.allocated_size() == 0);
 }
 
 TEST_CASE("MemArena helper", "[libts][MemArena]")
 {
-  ts::MemArena *arena = new ts::MemArena(256);
-  REQUIRE(arena->size() == 256);
-  REQUIRE(arena->remaining() == 256);
-  ts::MemSpan s = arena->alloc(56);
-  REQUIRE(arena->size() == 256);
-  REQUIRE(arena->remaining() == 200);
+  ts::MemArena arena{256};
+  REQUIRE(arena.size() == 0);
+  REQUIRE(arena.remaining() == 256);
+  ts::MemSpan s = arena.alloc(56);
+  REQUIRE(arena.size() == 56);
+  REQUIRE(arena.remaining() == 200);
   void *ptr = s.begin();
 
-  REQUIRE(arena->contains((char *)ptr));
-  REQUIRE(arena->contains((char *)ptr + 100)); // even though span isn't this 
large, this pointer should still be in arena
-  REQUIRE(!arena->contains((char *)ptr + 300));
-  REQUIRE(!arena->contains((char *)ptr - 1));
-  REQUIRE(arena->contains((char *)ptr + 255));
-  REQUIRE(!arena->contains((char *)ptr + 256));
-
-  arena->freeze(128);
-  REQUIRE(arena->contains((char *)ptr));
-  REQUIRE(arena->contains((char *)ptr + 100));
-  ts::MemSpan s2 = arena->alloc(10);
+  REQUIRE(arena.contains((char *)ptr));
+  REQUIRE(arena.contains((char *)ptr + 100)); // even though span isn't this 
large, this pointer should still be in arena
+  REQUIRE(!arena.contains((char *)ptr + 300));
+  REQUIRE(!arena.contains((char *)ptr - 1));
+  REQUIRE(arena.contains((char *)ptr + 255));
+  REQUIRE(!arena.contains((char *)ptr + 256));
+
+  arena.freeze(128);
+  REQUIRE(arena.contains((char *)ptr));
+  REQUIRE(arena.contains((char *)ptr + 100));
+  ts::MemSpan s2 = arena.alloc(10);
   void *ptr2     = s2.begin();
-  REQUIRE(arena->contains((char *)ptr));
-  REQUIRE(arena->contains((char *)ptr2));
-  REQUIRE(arena->unallocated_size() == 384 - 66);
-  REQUIRE(arena->allocated_size() == 56 + 10);
+  REQUIRE(arena.contains((char *)ptr));
+  REQUIRE(arena.contains((char *)ptr2));
+  REQUIRE(arena.allocated_size() == 56 + 10);
 
-  arena->thaw();
-  REQUIRE(!arena->contains((char *)ptr));
-  REQUIRE(arena->contains((char *)ptr2));
+  arena.thaw();
+  REQUIRE(!arena.contains((char *)ptr));
+  REQUIRE(arena.contains((char *)ptr2));
 
-  REQUIRE(arena->remaining() == 128 - 10);
-  REQUIRE(arena->allocated_size() == 10);
+  REQUIRE(arena.remaining() == 128 - 10);
+  REQUIRE(arena.allocated_size() == 10);
 }
 
 TEST_CASE("MemArena large alloc", "[libts][MemArena]")
 {
-  ts::MemArena *arena = new ts::MemArena(); // 32k
+  ts::MemArena arena;
 
-  size_t arena_size = arena->size(); // little bit less than 1 << 15
+  size_t arena_size = arena.size(); // little bit less than 1 << 15
 
-  ts::MemSpan s = arena->alloc(4000);
+  ts::MemSpan s = arena.alloc(4000);
   REQUIRE(s.size() == 4000);
 
   ts::MemSpan s_a[10];
-  s_a[0] = arena->alloc(100);
-  s_a[1] = arena->alloc(200);
-  s_a[2] = arena->alloc(300);
-  s_a[3] = arena->alloc(400);
-  s_a[4] = arena->alloc(500);
-  s_a[5] = arena->alloc(600);
-  s_a[6] = arena->alloc(700);
-  s_a[7] = arena->alloc(800);
-  s_a[8] = arena->alloc(900);
-  s_a[9] = arena->alloc(1000);
-
-  REQUIRE(arena->size() == arena_size); // didnt resize
+  s_a[0] = arena.alloc(100);
+  s_a[1] = arena.alloc(200);
+  s_a[2] = arena.alloc(300);
+  s_a[3] = arena.alloc(400);
+  s_a[4] = arena.alloc(500);
+  s_a[5] = arena.alloc(600);
+  s_a[6] = arena.alloc(700);
+  s_a[7] = arena.alloc(800);
+  s_a[8] = arena.alloc(900);
+  s_a[9] = arena.alloc(1000);
 
   // ensure none of the spans have any overlap in memory.
   for (int i = 0; i < 10; ++i) {
     s = s_a[i];
     for (int j = i + 1; j < 10; ++j) {
-      REQUIRE(s_a[i] != s_a[j]);
+      REQUIRE(s_a[i].data() != s_a[j].data());
     }
   }
 }
 
 TEST_CASE("MemArena block allocation", "[libts][MemArena]")
 {
-  ts::MemArena *arena = new ts::MemArena(64);
-  ts::MemSpan s       = arena->alloc(32);
-  ts::MemSpan s2      = arena->alloc(16);
-  ts::MemSpan s3      = arena->alloc(16);
+  ts::MemArena arena{64};
+  ts::MemSpan s  = arena.alloc(32);
+  ts::MemSpan s2 = arena.alloc(16);
+  ts::MemSpan s3 = arena.alloc(16);
 
   REQUIRE(s.size() == 32);
-  REQUIRE(arena->remaining() == 0);
-  REQUIRE(arena->unallocated_size() == 0);
-  REQUIRE(arena->allocated_size() == 64);
+  REQUIRE(arena.remaining() == 0);
+  REQUIRE(arena.allocated_size() == 64);
 
-  REQUIRE(arena->contains((char *)s.begin()));
-  REQUIRE(arena->contains((char *)s2.begin()));
-  REQUIRE(arena->contains((char *)s3.begin()));
+  REQUIRE(arena.contains((char *)s.begin()));
+  REQUIRE(arena.contains((char *)s2.begin()));
+  REQUIRE(arena.contains((char *)s3.begin()));
 
   REQUIRE((char *)s.begin() + 32 == (char *)s2.begin());
   REQUIRE((char *)s.begin() + 48 == (char *)s3.begin());
@@ -197,14 +186,14 @@ TEST_CASE("MemArena block allocation", 
"[libts][MemArena]")
 TEST_CASE("MemArena full blocks", "[libts][MemArena]")
 {
   // couple of large allocations - should be exactly sized in the generation.
-  ts::MemArena *arena = new ts::MemArena();
-  size_t init_size    = arena->size();
+  ts::MemArena arena;
+  size_t init_size = 32000;
 
-  arena->alloc(init_size - 64);
-  arena->alloc(32000); // should in its own box - exactly sized.
-  arena->alloc(64000);
+  arena.reserve(init_size);
+  arena.alloc(init_size - 64);
+  arena.alloc(32000); // should in its own box - exactly sized.
+  arena.alloc(64000); // same here.
 
-  REQUIRE(arena->size() >= 32000 + 64000 + init_size); // may give a bit more 
but shouldnt be less
-  REQUIRE(arena->allocated_size() == 32000 + 64000 + init_size - 64);
-  REQUIRE(arena->remaining() >= 64);
-}
\ No newline at end of file
+  REQUIRE(arena.remaining() >= 64);
+  REQUIRE(arena.extent() > 32000 + 64000 + init_size);
+}

-- 
To stop receiving notification emails like this one, please contact
a...@apache.org.

Reply via email to