This is an automated email from the ASF dual-hosted git repository.
bneradt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/master by this push:
new c186de2784 XPACK: update_maximum_size for storage (#11396)
c186de2784 is described below
commit c186de2784dd86f20316a42d2a6ffcaa245ef4de
Author: Brian Neradt <[email protected]>
AuthorDate: Thu May 30 16:39:34 2024 -0500
XPACK: update_maximum_size for storage (#11396)
This updates the XpackDynamicTableStorage memory allocated when
XpackDynamicTable's update_maximum_size is called. Without this
expansion, XpackDynamicTableStorage's memory is potentially overrun with
future insertions of header fields.
Fixes: #11388
---
include/proxy/hdrs/XPACK.h | 151 +++++++++++++++++++++++--
src/proxy/hdrs/XPACK.cc | 83 ++++++++++++--
src/proxy/hdrs/unit_tests/test_XPACK.cc | 193 ++++++++++++++++++++++++++++----
3 files changed, 385 insertions(+), 42 deletions(-)
diff --git a/include/proxy/hdrs/XPACK.h b/include/proxy/hdrs/XPACK.h
index e69445e80b..685dc6e7f7 100644
--- a/include/proxy/hdrs/XPACK.h
+++ b/include/proxy/hdrs/XPACK.h
@@ -51,21 +51,140 @@ struct XpackDynamicTableEntry {
const char *wks = nullptr;
};
+/** The memory containing the header fields. */
class XpackDynamicTableStorage
{
+ friend class ExpandCapacityContext;
+
public:
+ /** The storage for a dynamic table.
+ *
+ * @param[in] size The capacity of the table for header fields.
+ */
XpackDynamicTableStorage(uint32_t size);
~XpackDynamicTableStorage();
- void read(uint32_t offset, const char **name, uint32_t name_len, const
char **value, uint32_t value_len) const;
+
+ /** Obtain the HTTP field name and value at @a offset bytes.
+ *
+ * @param[in] offset The offset from the start of the allocation from which
to
+ * obtain the header field.
+ * @param[out] name A pointer to contain the name of the header field.
+ * @param[out] name_len The length of the name.
+ * @param[out] value A pointer to contain the value of the header field.
+ * @param[out] value_len The length of the value.
+ */
+ void read(uint32_t offset, const char **name, uint32_t name_len, const char
**value, uint32_t value_len) const;
+
+ /** Writ the HTTP field at the head of the allocated data
+ *
+ * @param[in] name The HTTP field name to write.
+ * @param[in] name_len The length of the name.
+ * @param[in] value The HTTP field value to write.
+ * @param[in] value_len The length of the value.
+ *
+ * @return The offset from the start of the allocation where the header field
+ * was written.
+ */
uint32_t write(const char *name, uint32_t name_len, const char *value,
uint32_t value_len);
- void erase(uint32_t name_len, uint32_t value_len);
+
+ /** The amount of written bytes.
+ *
+ * The amount of written, unerased data. This is the difference between @a
+ * _head and @a _tail.
+ *
+ * @return The number of written bytes.
+ */
+ uint32_t size() const;
private:
+ /** Start expanding the capacity.
+ *
+ * Expanding the capacity is a two step process in which @a
+ * _start_expanding_capacity is used to prepare for the expansion. This
+ * populates @a _old_data with a pointer to the current @a _data pointer and
+ * then allocates a new buffer per @a new_max_size and sets @a _data to that.
+ * The caller then reinserts the current headers into the new buffer. Once
+ * that is complete, the caller calls @a _finish_expanding_capacity to free
+ * the old buffer.
+ *
+ * Handling these two phases should only be done by ExpandCapacityContext,
+ * therefore these methods are private and only accessible to
+ * ExpandCapacityContext via a friend relationship.
+ *
+ * @note XpackDynamicTableStorage only supports expanding the buffer. This
+ * preserves offsets used by XpackDynamicTableEntry. Thus this function will
+ * only return true when @a new_max_size is greater than the current
capacity.
+ *
+ * The caller will need to reinsert all header fields after expanding the
+ * capacity.
+ *
+ * @param[in] new_max_size The new maximum size of the table.
+ * @return true if the capacity was expanded, false otherwise.
+ */
+ bool _start_expanding_capacity(uint32_t new_max_size);
+
+ /** Finish expanding the capacity by freeing @a old_data. */
+ void _finish_expanding_capacity();
+
+private:
+ /** The amount of space above @a size allocated as a buffer. */
uint32_t _overwrite_threshold = 0;
- uint8_t *_data = nullptr;
- uint32_t _data_size = 0;
- uint32_t _head = 0;
- uint32_t _tail = 0;
+
+ /** The space allocated and populated for the header fields. */
+ uint8_t *_data = nullptr;
+
+ /** When in an expansion phase, this points to the old memory.
+ *
+ * See the documentation in @a _start_expanding_capacity.
+ */
+ uint8_t *_old_data = nullptr;
+
+ /** The size of allocated space for @a data.
+ *
+ * This is set to twice the requested space provided as @a size to the
+ * constructor. This is done to avoid buffer wrapping.
+ */
+ uint32_t _capacity = 0;
+
+ /** A pointer to the last byte written.
+ *
+ * @a _head is initialized to the last allocated byte. As header field data
is
+ * populated in the allocated space, this is advanced to the last byte
+ * written. Thus the next write will start at the byte just after @a _head.
+ */
+ uint32_t _head = 0;
+};
+
+/** Define a context for expanding XpackDynamicTableStorage.
+ *
+ * Construction and destruction starts the expansion and finishes it,
respectively.
+ */
+class ExpandCapacityContext
+{
+public:
+ /** Begin the storage expansion phase to the @a new_max_size. */
+ ExpandCapacityContext(XpackDynamicTableStorage &storage, uint32_t
new_max_size) : _storage{storage}
+ {
+ this->_storage._start_expanding_capacity(new_max_size);
+ }
+ /** End the storage expansion phase, cleaning up the old storage memory. */
+ ~ExpandCapacityContext() { this->_storage._finish_expanding_capacity(); }
+
+ // No copying or moving.
+ ExpandCapacityContext(const ExpandCapacityContext &) = delete;
+ ExpandCapacityContext &operator=(const ExpandCapacityContext &) = delete;
+ ExpandCapacityContext(ExpandCapacityContext &&) = delete;
+ ExpandCapacityContext &operator=(ExpandCapacityContext &&) = delete;
+
+ /** Copy the field data from the old memory to the new one.
+ * @param[in] old_offset The offset of data in the old memory.
+ * @param[in] len The length of data to copy.
+ * @return The offset of the copied data in the new memory.
+ */
+ uint32_t copy_field(uint32_t old_offset, uint32_t len);
+
+private:
+ XpackDynamicTableStorage &_storage;
};
class XpackDynamicTable
@@ -107,14 +226,30 @@ private:
uint32_t _entries_tail = 0;
XpackDynamicTableStorage _storage;
+ /** Expand @a _storage to the new size.
+ *
+ * This takes care of expanding @a _storage's size and handles updating the
+ * new offsets for each entry that this expansion requires.
+ *
+ * @param[in] new_storage_size The new size to expand @a _storage to.
+ */
+ void _expand_storage_size(uint32_t new_storage_size);
+
/**
* The type of reuired_size is uint64 so that we can handle a size that is
begger than the table capacity.
* Passing a value more than UINT32_MAX evicts every entry and return false.
*/
bool _make_space(uint64_t required_size);
- /**
- * Calcurates the index number for _entries, which is a kind of circular
buffer.
+ /** Calcurates the index number for _entries, which is a kind of circular
buffer.
+ *
+ * @param[in] base The place to start indexing from. Passing @a _tail
+ * references the start of the buffer, while @a _head references the end of
+ * the buffer.
+ *
+ * @param[in] offset The offset from the base. A value of 1 means the first
+ * entry from @a base. Thus a value of @a _tail for @a base and 1 for @a
+ * offset references the first entry in the buffer.
*/
uint32_t _calc_index(uint32_t base, int64_t offset) const;
};
diff --git a/src/proxy/hdrs/XPACK.cc b/src/proxy/hdrs/XPACK.cc
index a218e9f79f..9eb83dde32 100644
--- a/src/proxy/hdrs/XPACK.cc
+++ b/src/proxy/hdrs/XPACK.cc
@@ -29,6 +29,7 @@
#include "tscore/Diags.h"
#include "tscore/ink_memory.h"
#include "tsutil/LocalBuffer.h"
+#include <cstdint>
namespace
{
@@ -412,11 +413,15 @@ XpackDynamicTable::update_maximum_size(uint32_t
new_max_size)
if (used < new_max_size) {
this->_maximum_size = new_max_size;
this->_available = new_max_size - used;
+ this->_expand_storage_size(new_max_size);
return true;
}
+ // used is larger than or equal to new_max_size. This means that
_maximum_size
+ // is shrinking and we need to evict entries to get the used space below
+ // new_max_size.
bool ret = this->_make_space(used - new_max_size);
- // Size update must suceeds
+ // Size update must succeed.
if (ret) {
this->_available = new_max_size - (this->_maximum_size -
this->_available);
this->_maximum_size = new_max_size;
@@ -476,6 +481,18 @@ XpackDynamicTable::count() const
}
}
+void
+XpackDynamicTable::_expand_storage_size(uint32_t new_storage_size)
+{
+ ExpandCapacityContext context{this->_storage, new_storage_size};
+ uint32_t i = this->_calc_index(this->_entries_tail, 1);
+ uint32_t end = this->_calc_index(this->_entries_head, 1);
+ for (; i != end; i = this->_calc_index(i, 1)) {
+ auto &entry = this->_entries[i];
+ entry.offset = context.copy_field(entry.offset, entry.name_len +
entry.value_len);
+ }
+}
+
bool
XpackDynamicTable::_make_space(uint64_t required_size)
{
@@ -523,11 +540,10 @@ XpackDynamicTable::_calc_index(uint32_t base, int64_t
offset) const
// DynamicTableStorage
//
-XpackDynamicTableStorage::XpackDynamicTableStorage(uint32_t size) : _head(size
* 2 - 1), _tail(size * 2 - 1)
+XpackDynamicTableStorage::XpackDynamicTableStorage(uint32_t size)
+ : _overwrite_threshold(size), _capacity{size * 2}, _head{_capacity - 1}
{
- this->_data_size = size * 2;
- this->_data = reinterpret_cast<uint8_t
*>(ats_malloc(this->_data_size));
- this->_overwrite_threshold = size;
+ this->_data = reinterpret_cast<uint8_t *>(ats_malloc(this->_capacity));
}
XpackDynamicTableStorage::~XpackDynamicTableStorage()
@@ -546,20 +562,63 @@ XpackDynamicTableStorage::read(uint32_t offset, const
char **name, uint32_t name
uint32_t
XpackDynamicTableStorage::write(const char *name, uint32_t name_len, const
char *value, uint32_t value_len)
{
- uint32_t offset = (this->_head + 1) % this->_data_size;
- memcpy(this->_data + offset, name, name_len);
- memcpy(this->_data + offset + name_len, value, value_len);
+ // insert_entry should guard against buffer overrun, but rather than overrun
+ // our buffer we assert here in case something horrible went wrong.
+ ink_release_assert(name_len + value_len <= this->_capacity);
+ ink_release_assert(this->_head == this->_capacity - 1 || this->_head +
name_len + value_len <= this->_capacity);
- this->_head = (this->_head + (name_len + value_len)) % this->_data_size;
+ uint32_t offset = (this->_head + 1) % this->_capacity;
+ if (name_len > 0) {
+ memcpy(this->_data + offset, name, name_len);
+ }
+ if (value_len > 0) {
+ memcpy(this->_data + offset + name_len, value, value_len);
+ }
+
+ this->_head = (this->_head + (name_len + value_len)) % this->_capacity;
if (this->_head > this->_overwrite_threshold) {
- this->_head = 0;
+ // This is how we wrap back around to the beginning of the buffer.
+ this->_head = this->_capacity - 1;
}
return offset;
}
+bool
+XpackDynamicTableStorage::_start_expanding_capacity(uint32_t new_maximum_size)
+{
+ if ((new_maximum_size * 2) <= this->_capacity) {
+ // We never shrink our memory for the data storage because we don't support
+ // arbitrary eviction from it via XpackDynamicTableStorage. The
+ // XpackDynamicTable class keeps track of field sizes and therefore can
+ // evict properly. Also, we don't want to invalidate XpackDynamicTable's
+ // offsets into the storage.
+ return false;
+ }
+ this->_capacity = new_maximum_size * 2;
+
+ this->_old_data = this->_data;
+ this->_data = reinterpret_cast<uint8_t *>(ats_malloc(this->_capacity));
+ if (this->_data == nullptr) {
+ // Realloc failed. We're out of memory and ATS is in trouble.
+ return false;
+ }
+
+ this->_overwrite_threshold = new_maximum_size;
+ this->_head = this->_capacity - 1;
+ return true;
+}
+
void
-XpackDynamicTableStorage::erase(uint32_t name_len, uint32_t value_len)
+XpackDynamicTableStorage::_finish_expanding_capacity()
+{
+ ats_free(this->_old_data);
+ this->_old_data = nullptr;
+}
+
+uint32_t
+ExpandCapacityContext::copy_field(uint32_t offset, uint32_t len)
{
- this->_tail = (this->_tail + (name_len + value_len)) % this->_data_size;
+ char const *field = reinterpret_cast<char const *>(this->_storage._old_data
+ offset);
+ return this->_storage.write(field, len, nullptr, 0);
}
diff --git a/src/proxy/hdrs/unit_tests/test_XPACK.cc
b/src/proxy/hdrs/unit_tests/test_XPACK.cc
index f2220371f4..8e8258bc5b 100644
--- a/src/proxy/hdrs/unit_tests/test_XPACK.cc
+++ b/src/proxy/hdrs/unit_tests/test_XPACK.cc
@@ -21,6 +21,8 @@
limitations under the License.
*/
+#include <string>
+#include <string_view>
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
@@ -30,6 +32,17 @@
static constexpr int BUFSIZE_FOR_REGRESSION_TEST = 128;
+std::string
+get_long_string(int size)
+{
+ std::string s(size, '0');
+ auto p = s.data();
+ for (int i = 0; i < size; ++i) {
+ p[i] = '0' + (i % 10);
+ }
+ return s;
+}
+
TEST_CASE("XPACK_Integer", "[xpack]")
{
// [RFC 7541] C.1. Integer Representation Examples
@@ -262,17 +275,18 @@ TEST_CASE("XPACK_String", "[xpack]")
REQUIRE(memcmp(value, "value2", value_len) == 0);
// Insert one more entry (this should evict all existing entries)
- dt.insert_entry("name4-1234567890123456789012345",
"value4-9876543210987654321098765");
- REQUIRE(dt.size() == strlen("name4-1234567890123456789012345") +
strlen("value4-9876543210987654321098765") + 32);
+ std::string field_4 = get_long_string(50);
+ dt.insert_entry(field_4, field_4); // 100 bytes. _head should now be 0.
+ REQUIRE(dt.size() == 2 * field_4.length() + 32);
REQUIRE(dt.maximum_size() == MAX_SIZE);
REQUIRE(dt.count() == 1);
REQUIRE(dt.largest_index() == 3);
result = dt.lookup(3, &name, &name_len, &value, &value_len);
REQUIRE(result.match_type == XpackLookupResult::MatchType::EXACT);
- REQUIRE(name_len == strlen("name4-1234567890123456789012345"));
- REQUIRE(memcmp(name, "name4-1234567890123456789012345", name_len) == 0);
- REQUIRE(value_len == strlen("value4-9876543210987654321098765"));
- REQUIRE(memcmp(value, "value4-9876543210987654321098765", value_len) == 0);
+ REQUIRE(name_len == field_4.length());
+ REQUIRE(memcmp(name, field_4.data(), name_len) == 0);
+ REQUIRE(value_len == field_4.length());
+ REQUIRE(memcmp(value, field_4.data(), value_len) == 0);
result = dt.lookup(dt.largest_index() - 1, &name, &name_len, &value,
&value_len);
REQUIRE(result.match_type == XpackLookupResult::MatchType::NONE);
result = dt.lookup(dt.largest_index(), &name, &name_len, &value,
&value_len);
@@ -280,7 +294,7 @@ TEST_CASE("XPACK_String", "[xpack]")
result = dt.lookup(dt.largest_index() + 1, &name, &name_len, &value,
&value_len);
REQUIRE(result.match_type == XpackLookupResult::MatchType::NONE);
- // Update the maximum size (this should not evict anything)
+ // Update the maximum size to the current used size (this should not evict
anything).
size_t current_size = dt.size();
dt.update_maximum_size(current_size);
REQUIRE(dt.size() == current_size);
@@ -289,35 +303,170 @@ TEST_CASE("XPACK_String", "[xpack]")
REQUIRE(dt.largest_index() == 3);
result = dt.lookup(3, &name, &name_len, &value, &value_len);
REQUIRE(result.match_type == XpackLookupResult::MatchType::EXACT);
- REQUIRE(name_len == strlen("name4-1234567890123456789012345"));
- REQUIRE(memcmp(name, "name4-1234567890123456789012345", name_len) == 0);
- REQUIRE(value_len == strlen("value4-9876543210987654321098765"));
- REQUIRE(memcmp(value, "value4-9876543210987654321098765", value_len) == 0);
+ REQUIRE(name_len == field_4.length());
+ REQUIRE(memcmp(name, field_4.data(), name_len) == 0);
+ REQUIRE(value_len == field_4.length());
+ REQUIRE(memcmp(value, field_4.data(), value_len) == 0);
+
+ // Expand the maximum size (this should not evict anything).
+ constexpr uint16_t LARGER_MAX_SIZE = 4096;
+ dt.update_maximum_size(LARGER_MAX_SIZE);
+ REQUIRE(dt.size() == current_size);
+ REQUIRE(dt.maximum_size() == LARGER_MAX_SIZE);
+ REQUIRE(dt.count() == 1);
+ // Note that largest_index must always be preserved across all resizes and
evictions.
+ REQUIRE(dt.largest_index() == 3);
+ result = dt.lookup(dt.largest_index(), &name, &name_len, &value,
&value_len);
+ REQUIRE(result.match_type == XpackLookupResult::MatchType::EXACT);
+ REQUIRE(name_len == field_4.length());
+ REQUIRE(memcmp(name, field_4.data(), name_len) == 0);
+ REQUIRE(value_len == field_4.length());
+ REQUIRE(memcmp(value, field_4.data(), value_len) == 0);
+
+ // Add a new entry and make sure the existing valid entry is not
overwritten.
+ std::string field_5 = get_long_string(100);
+ dt.insert_entry(field_5, field_5);
+ REQUIRE(dt.count() == 2);
+ REQUIRE(dt.size() == 2 * field_4.length() + 32 + 2 * field_5.length() +
32);
+ result = dt.lookup(dt.largest_index() - 1, &name, &name_len, &value,
&value_len);
+ REQUIRE(result.match_type == XpackLookupResult::MatchType::EXACT);
+ REQUIRE(name_len == field_4.length());
+ REQUIRE(memcmp(name, field_4.data(), name_len) == 0);
+ REQUIRE(value_len == field_4.length());
+ REQUIRE(memcmp(value, field_4.data(), value_len) == 0);
+ result = dt.lookup(dt.largest_index(), &name, &name_len, &value,
&value_len);
+ REQUIRE(result.match_type == XpackLookupResult::MatchType::EXACT);
+ REQUIRE(name_len == field_5.length());
+ REQUIRE(memcmp(name, field_5.data(), name_len) == 0);
+ REQUIRE(value_len == field_5.length());
+ REQUIRE(memcmp(value, field_5.data(), value_len) == 0);
- // Update the maximum size (this should evict everything)
+ // Update (shrink) the maximum size to 0 (this should evict everything)
+ auto const previous_largest_index = dt.largest_index();
dt.update_maximum_size(0);
REQUIRE(dt.size() == 0);
REQUIRE(dt.maximum_size() == 0);
REQUIRE(dt.is_empty());
REQUIRE(dt.count() == 0);
- result = dt.lookup(1, &name, &name_len, &value, &value_len);
- REQUIRE(result.match_type == XpackLookupResult::MatchType::NONE);
- result = dt.lookup(2, &name, &name_len, &value, &value_len);
- REQUIRE(result.match_type == XpackLookupResult::MatchType::NONE);
+ for (auto i = 0u; i <= previous_largest_index; ++i) {
+ result = dt.lookup(i, &name, &name_len, &value, &value_len);
+ REQUIRE(result.match_type == XpackLookupResult::MatchType::NONE);
+ }
- // Reset the maximum size
- dt.update_maximum_size(4096);
+ // Update the maximum size to a new value.
+ dt.update_maximum_size(LARGER_MAX_SIZE);
REQUIRE(dt.size() == 0);
- REQUIRE(dt.maximum_size() == 4096);
+ REQUIRE(dt.maximum_size() == LARGER_MAX_SIZE);
REQUIRE(dt.is_empty());
REQUIRE(dt.count() == 0);
- // Insert an oversided item
- dt.insert_entry("name1", "value1"); // This should be evicted
- dt.insert_entry("", UINT32_MAX, "", UINT32_MAX); // This should not even
cause a buffer over run
+ // Insert a new item.
+ dt.insert_entry("name1", "value1");
+ REQUIRE(dt.maximum_size() == LARGER_MAX_SIZE);
+ REQUIRE(dt.count() == 1);
+ // Note that indexing will continue from the last index, despite eviction.
+ REQUIRE(dt.largest_index() == 5);
+ // The old index values should not match.
+ result = dt.lookup(dt.largest_index() - 1, &name, &name_len, &value,
&value_len);
+ REQUIRE(result.match_type == XpackLookupResult::MatchType::NONE);
+ // The last inserted item should still exist though.
+ result = dt.lookup(dt.largest_index(), &name, &name_len, &value,
&value_len);
+ REQUIRE(result.match_type == XpackLookupResult::MatchType::EXACT);
+ REQUIRE(name_len == strlen("name1"));
+ REQUIRE(memcmp(name, "name1", name_len) == 0);
+ REQUIRE(value_len == strlen("value1"));
+ REQUIRE(memcmp(value, "value1", value_len) == 0);
+
+ // Insert an oversized item. The previous item should be evicted.
+ dt.insert_entry("", UINT32_MAX, "", UINT32_MAX); // This should not cause
a buffer over run
REQUIRE(dt.size() == 0);
REQUIRE(dt.maximum_size() == 4096);
REQUIRE(dt.is_empty());
REQUIRE(dt.count() == 0);
}
}
+
+// Return a 110 character string.
+std::string
+get_long_string(std::string_view prefix)
+{
+ return std::string(prefix) + std::string("0123456789"
+ "0123456789"
+ "0123456789"
+ "0123456789"
+ "0123456789"
+ "0123456789"
+ "0123456789"
+ "0123456789"
+ "0123456789"
+ "0123456789"
+ "0123456789");
+}
+
+TEST_CASE("XpackDynamicTableStorage", "[xpack]")
+{
+ constexpr uint16_t MAX_SIZE = 100;
+ XpackDynamicTableStorage storage{MAX_SIZE};
+
+ // First write.
+ auto const name1 = get_long_string("name1");
+ auto const value1 = get_long_string("value1");
+ auto const offset1 = storage.write(name1.data(), 25, value1.data(), 25);
+ REQUIRE(offset1 == 0);
+ char const *name = nullptr;
+ char const *value = nullptr;
+ storage.read(offset1, &name, 25, &value, 25);
+ REQUIRE(memcmp(name, name1.data(), 25) == 0);
+ REQUIRE(memcmp(value, value1.data(), 25) == 0);
+
+ // Second write.
+ auto const name2 = get_long_string("name2");
+ auto const value2 = get_long_string("value2");
+ auto const offset2 = storage.write(name2.data(), 25, value2.data(), 25);
+ REQUIRE(offset2 == 50);
+ storage.read(offset2, &name, 25, &value, 25);
+ REQUIRE(memcmp(name, name2.data(), 25) == 0);
+ REQUIRE(memcmp(value, value2.data(), 25) == 0);
+
+ // Third write - exceed size and enter into the overwrite threshold.
+ auto const name3 = get_long_string("name3");
+ auto const value3 = get_long_string("value3");
+ auto const offset3 = storage.write(name3.data(), 25, value3.data(), 25);
+ REQUIRE(offset3 == 100);
+ storage.read(offset3, &name, 25, &value, 25);
+ REQUIRE(memcmp(name, name3.data(), 25) == 0);
+ REQUIRE(memcmp(value, value3.data(), 25) == 0);
+
+ // Note that the offset will now wrap back around to 0 since we've exceeded
MAX_SIZE.
+ auto const name4 = get_long_string("name4");
+ auto const value4 = get_long_string("value4");
+ auto const offset4 = storage.write(name4.data(), 25, value4.data(), 25);
+ REQUIRE(offset4 == 0);
+ storage.read(offset4, &name, 25, &value, 25);
+ REQUIRE(memcmp(name, name4.data(), 25) == 0);
+ REQUIRE(memcmp(value, value4.data(), 25) == 0);
+
+ // Test expanding capacity. Note that we start at offset2 since the data at
+ // offset1 will be overwritten.
+ uint32_t reoffset2 = 0, reoffset3 = 0, reoffset4 = 0;
+ {
+ ExpandCapacityContext context{storage, 200};
+ reoffset2 = context.copy_field(offset2, 50);
+ // Note that the offsets will now be shifted, starting from 0 now.
+ REQUIRE(reoffset2 == 0);
+ reoffset3 = context.copy_field(offset3, 50);
+ REQUIRE(reoffset3 == 50);
+ reoffset4 = context.copy_field(offset4, 50);
+ REQUIRE(reoffset4 == 100);
+ } // context goes out of scope and finishes the expansion phase.
+
+ storage.read(reoffset2, &name, 25, &value, 25);
+ REQUIRE(memcmp(name, name2.data(), 25) == 0);
+ REQUIRE(memcmp(value, value2.data(), 25) == 0);
+ storage.read(reoffset3, &name, 25, &value, 25);
+ REQUIRE(memcmp(name, name3.data(), 25) == 0);
+ REQUIRE(memcmp(value, value3.data(), 25) == 0);
+ storage.read(reoffset4, &name, 25, &value, 25);
+ REQUIRE(memcmp(name, name4.data(), 25) == 0);
+ REQUIRE(memcmp(value, value4.data(), 25) == 0);
+}