kou commented on code in PR #43096:
URL: https://github.com/apache/arrow/pull/43096#discussion_r1683650975
##########
cpp/src/arrow/filesystem/azurefs.cc:
##########
@@ -1066,20 +1098,88 @@ class ObjectAppendStream final : public
io::OutputStream {
// flush. This also avoids some unhandled errors when flushing in the
destructor.
return Status::OK();
}
- return CommitBlockList(block_blob_client_, block_ids_,
commit_block_list_options_);
+
+ auto fut = FlushAsync();
+ RETURN_NOT_OK(fut.status());
+
+ return CommitBlockList();
+ }
+
+ Future<> FlushAsync() {
+ RETURN_NOT_OK(CheckClosed("flush"));
Review Comment:
Can we move
https://github.com/apache/arrow/blob/0bae073d1abe439d113cc12e06e7ada886a2f2fd/cpp/src/arrow/filesystem/azurefs.cc#L1063-L1067
from `Flush()`?
It seems that we don't need to execute `CheckClosed("flush")` and `if
(!initialized_)` in both of `Flush()` and `FlushAsync()`.
##########
cpp/src/arrow/filesystem/azurefs_test.cc:
##########
@@ -1471,6 +1473,93 @@ class TestAzureFileSystem : public ::testing::Test {
arrow::fs::AssertFileInfo(fs(), data.Path("dir/file0"), FileType::File);
}
+ void AssertObjectContents(AzureFileSystem* fs, std::string_view path,
+ std::string_view expected) {
+ ASSERT_OK_AND_ASSIGN(auto input, fs->OpenInputStream(std::string{path}));
+ std::string contents;
+ std::shared_ptr<Buffer> buffer;
+ do {
+ ASSERT_OK_AND_ASSIGN(buffer, input->Read(128 * 1024));
+ ASSERT_TRUE(buffer);
+ contents.append(buffer->ToString());
+ } while (buffer->size() != 0);
+
+ EXPECT_EQ(expected, contents);
+ }
+
+ void TestOpenOutputStreamSmall() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ const std::string_view expected(PreexistingData::kLoremIpsum);
+ ASSERT_OK(output->Write(expected));
+ ASSERT_OK(output->Close());
+
+ // Verify we can read the object back.
+ AssertObjectContents(fs.get(), path, expected);
+ }
+
+ void TestOpenOutputStreamLarge() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ std::array<std::int64_t, 3> sizes{2570 * 1024, 258 * 1024, 259 * 1024};
+ std::array<std::string, 3> buffers{
+ std::string(sizes[0], 'A'),
+ std::string(sizes[1], 'B'),
+ std::string(sizes[2], 'C'),
+ };
+ auto expected = std::int64_t{0};
Review Comment:
```suggestion
auto expected_size = std::int64_t{0};
```
##########
cpp/src/arrow/filesystem/azurefs.cc:
##########
@@ -1066,20 +1098,88 @@ class ObjectAppendStream final : public
io::OutputStream {
// flush. This also avoids some unhandled errors when flushing in the
destructor.
return Status::OK();
}
- return CommitBlockList(block_blob_client_, block_ids_,
commit_block_list_options_);
+
+ auto fut = FlushAsync();
+ RETURN_NOT_OK(fut.status());
+
+ return CommitBlockList();
+ }
+
+ Future<> FlushAsync() {
+ RETURN_NOT_OK(CheckClosed("flush"));
+
+ // Wait for background writes to finish
+ std::unique_lock<std::mutex> lock(upload_state_->mutex);
+ return upload_state_->pending_blocks_completed;
}
private:
- Status DoAppend(const void* data, int64_t nbytes,
- std::shared_ptr<Buffer> owned_buffer = nullptr) {
- RETURN_NOT_OK(CheckClosed("append"));
- auto append_data = reinterpret_cast<const uint8_t*>(data);
- Core::IO::MemoryBodyStream block_content(append_data, nbytes);
- if (block_content.Length() == 0) {
- return Status::OK();
+ Status WriteBuffer() {
+ ARROW_ASSIGN_OR_RAISE(auto buf, current_block_->Finish());
+ current_block_.reset();
+ current_block_size_ = 0;
+ return AppendBlock(buf);
+ }
+
+ Status DoWrite(const void* data, int64_t nbytes,
+ std::shared_ptr<Buffer> owned_buffer = nullptr) {
+ if (closed_) {
+ return Status::Invalid("Operation on closed stream");
+ }
+
+ const auto* data_ptr = reinterpret_cast<const int8_t*>(data);
+ auto advance_ptr = [&data_ptr, &nbytes](const int64_t offset) {
+ data_ptr += offset;
+ nbytes -= offset;
+ };
+
+ // Handle case where we have some bytes buffered from prior calls.
+ if (current_block_size_ > 0) {
+ // Try to fill current buffer
+ const int64_t to_copy = std::min(nbytes, kBlockUploadSize -
current_block_size_);
+ RETURN_NOT_OK(current_block_->Write(data_ptr, to_copy));
+ current_block_size_ += to_copy;
+ advance_ptr(to_copy);
+ pos_ += to_copy;
+ content_length_ += to_copy;
+
+ // If buffer isn't full, break
+ if (current_block_size_ < kBlockUploadSize) {
+ return Status::OK();
+ }
+
+ // Upload current buffer
+ RETURN_NOT_OK(WriteBuffer());
+ }
+
+ // We can upload chunks without copying them into a buffer
+ while (nbytes >= kBlockUploadSize) {
+ RETURN_NOT_OK(AppendBlock(data_ptr, kBlockUploadSize));
+ advance_ptr(kBlockUploadSize);
+ pos_ += kBlockUploadSize;
+ content_length_ += kBlockUploadSize;
+ }
+
+ // Buffer remaining bytes
+ if (nbytes > 0) {
+ current_block_size_ = nbytes;
+ ARROW_ASSIGN_OR_RAISE(current_block_, io::BufferOutputStream::Create(
+ kBlockUploadSize,
io_context_.pool()));
Review Comment:
Can we reuse `Reset()`-ed `current_block_` instead of creating a new one?
##########
cpp/src/arrow/filesystem/azurefs_test.cc:
##########
@@ -1471,6 +1473,93 @@ class TestAzureFileSystem : public ::testing::Test {
arrow::fs::AssertFileInfo(fs(), data.Path("dir/file0"), FileType::File);
}
+ void AssertObjectContents(AzureFileSystem* fs, std::string_view path,
+ std::string_view expected) {
+ ASSERT_OK_AND_ASSIGN(auto input, fs->OpenInputStream(std::string{path}));
+ std::string contents;
+ std::shared_ptr<Buffer> buffer;
+ do {
+ ASSERT_OK_AND_ASSIGN(buffer, input->Read(128 * 1024));
+ ASSERT_TRUE(buffer);
Review Comment:
Why do we need this?
##########
cpp/src/arrow/filesystem/azurefs.cc:
##########
@@ -1066,20 +1098,88 @@ class ObjectAppendStream final : public
io::OutputStream {
// flush. This also avoids some unhandled errors when flushing in the
destructor.
return Status::OK();
}
- return CommitBlockList(block_blob_client_, block_ids_,
commit_block_list_options_);
+
+ auto fut = FlushAsync();
+ RETURN_NOT_OK(fut.status());
+
+ return CommitBlockList();
+ }
+
+ Future<> FlushAsync() {
+ RETURN_NOT_OK(CheckClosed("flush"));
+
+ // Wait for background writes to finish
+ std::unique_lock<std::mutex> lock(upload_state_->mutex);
+ return upload_state_->pending_blocks_completed;
}
private:
- Status DoAppend(const void* data, int64_t nbytes,
- std::shared_ptr<Buffer> owned_buffer = nullptr) {
- RETURN_NOT_OK(CheckClosed("append"));
- auto append_data = reinterpret_cast<const uint8_t*>(data);
- Core::IO::MemoryBodyStream block_content(append_data, nbytes);
- if (block_content.Length() == 0) {
- return Status::OK();
+ Status WriteBuffer() {
+ ARROW_ASSIGN_OR_RAISE(auto buf, current_block_->Finish());
+ current_block_.reset();
+ current_block_size_ = 0;
+ return AppendBlock(buf);
+ }
+
+ Status DoWrite(const void* data, int64_t nbytes,
+ std::shared_ptr<Buffer> owned_buffer = nullptr) {
+ if (closed_) {
+ return Status::Invalid("Operation on closed stream");
+ }
+
+ const auto* data_ptr = reinterpret_cast<const int8_t*>(data);
+ auto advance_ptr = [&data_ptr, &nbytes](const int64_t offset) {
+ data_ptr += offset;
+ nbytes -= offset;
+ };
Review Comment:
How about updating `pos_` and `content_length_` too in this and improve
variable name for the change?
##########
cpp/src/arrow/filesystem/azurefs_test.cc:
##########
@@ -1471,6 +1473,93 @@ class TestAzureFileSystem : public ::testing::Test {
arrow::fs::AssertFileInfo(fs(), data.Path("dir/file0"), FileType::File);
}
+ void AssertObjectContents(AzureFileSystem* fs, std::string_view path,
+ std::string_view expected) {
+ ASSERT_OK_AND_ASSIGN(auto input, fs->OpenInputStream(std::string{path}));
+ std::string contents;
+ std::shared_ptr<Buffer> buffer;
+ do {
+ ASSERT_OK_AND_ASSIGN(buffer, input->Read(128 * 1024));
+ ASSERT_TRUE(buffer);
+ contents.append(buffer->ToString());
+ } while (buffer->size() != 0);
+
+ EXPECT_EQ(expected, contents);
+ }
+
+ void TestOpenOutputStreamSmall() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ const std::string_view expected(PreexistingData::kLoremIpsum);
+ ASSERT_OK(output->Write(expected));
+ ASSERT_OK(output->Close());
+
+ // Verify we can read the object back.
+ AssertObjectContents(fs.get(), path, expected);
+ }
+
+ void TestOpenOutputStreamLarge() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ std::array<std::int64_t, 3> sizes{2570 * 1024, 258 * 1024, 259 * 1024};
Review Comment:
Could you add a comment why we should use these sizes?
##########
cpp/src/arrow/filesystem/azurefs_test.cc:
##########
@@ -1471,6 +1473,93 @@ class TestAzureFileSystem : public ::testing::Test {
arrow::fs::AssertFileInfo(fs(), data.Path("dir/file0"), FileType::File);
}
+ void AssertObjectContents(AzureFileSystem* fs, std::string_view path,
+ std::string_view expected) {
+ ASSERT_OK_AND_ASSIGN(auto input, fs->OpenInputStream(std::string{path}));
+ std::string contents;
+ std::shared_ptr<Buffer> buffer;
+ do {
+ ASSERT_OK_AND_ASSIGN(buffer, input->Read(128 * 1024));
+ ASSERT_TRUE(buffer);
+ contents.append(buffer->ToString());
+ } while (buffer->size() != 0);
+
+ EXPECT_EQ(expected, contents);
+ }
+
+ void TestOpenOutputStreamSmall() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ const std::string_view expected(PreexistingData::kLoremIpsum);
+ ASSERT_OK(output->Write(expected));
+ ASSERT_OK(output->Close());
+
+ // Verify we can read the object back.
+ AssertObjectContents(fs.get(), path, expected);
+ }
+
+ void TestOpenOutputStreamLarge() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ std::array<std::int64_t, 3> sizes{2570 * 1024, 258 * 1024, 259 * 1024};
+ std::array<std::string, 3> buffers{
+ std::string(sizes[0], 'A'),
+ std::string(sizes[1], 'B'),
+ std::string(sizes[2], 'C'),
+ };
+ auto expected = std::int64_t{0};
+ for (auto i = 0; i != 3; ++i) {
+ ASSERT_OK(output->Write(buffers[i]));
+ expected += sizes[i];
+ ASSERT_EQ(expected, output->Tell());
+ }
+ ASSERT_OK(output->Close());
+
+ AssertObjectContents(fs.get(), path, buffers[0] + buffers[1] + buffers[2]);
+ }
+
+ void TestOpenOutputStreamCloseAsyncDestructor() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+ std::shared_ptr<io::OutputStream> stream;
+ auto data = SetUpPreexistingData();
+ const std::string path = data.ContainerPath("test-write-object");
+ constexpr auto payload = PreexistingData::kLoremIpsum;
+
+ ASSERT_OK_AND_ASSIGN(stream, fs->OpenOutputStream(path));
+ ASSERT_OK(stream->Write(payload));
+ // Destructor implicitly closes stream and completes the multipart upload.
+ // GH-37670: Testing it doesn't matter whether flush is triggered
asynchronously
Review Comment:
It seems that GH-37670 is for S3 filesystem.
Is this true for Azure filesystem too?
##########
cpp/src/arrow/filesystem/azurefs_test.cc:
##########
@@ -1471,6 +1473,93 @@ class TestAzureFileSystem : public ::testing::Test {
arrow::fs::AssertFileInfo(fs(), data.Path("dir/file0"), FileType::File);
}
+ void AssertObjectContents(AzureFileSystem* fs, std::string_view path,
+ std::string_view expected) {
+ ASSERT_OK_AND_ASSIGN(auto input, fs->OpenInputStream(std::string{path}));
+ std::string contents;
+ std::shared_ptr<Buffer> buffer;
+ do {
+ ASSERT_OK_AND_ASSIGN(buffer, input->Read(128 * 1024));
+ ASSERT_TRUE(buffer);
+ contents.append(buffer->ToString());
+ } while (buffer->size() != 0);
+
+ EXPECT_EQ(expected, contents);
+ }
+
+ void TestOpenOutputStreamSmall() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ const std::string_view expected(PreexistingData::kLoremIpsum);
+ ASSERT_OK(output->Write(expected));
+ ASSERT_OK(output->Close());
+
+ // Verify we can read the object back.
+ AssertObjectContents(fs.get(), path, expected);
+ }
+
+ void TestOpenOutputStreamLarge() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ std::array<std::int64_t, 3> sizes{2570 * 1024, 258 * 1024, 259 * 1024};
+ std::array<std::string, 3> buffers{
+ std::string(sizes[0], 'A'),
+ std::string(sizes[1], 'B'),
+ std::string(sizes[2], 'C'),
+ };
+ auto expected = std::int64_t{0};
+ for (auto i = 0; i != 3; ++i) {
+ ASSERT_OK(output->Write(buffers[i]));
+ expected += sizes[i];
+ ASSERT_EQ(expected, output->Tell());
+ }
+ ASSERT_OK(output->Close());
+
+ AssertObjectContents(fs.get(), path, buffers[0] + buffers[1] + buffers[2]);
+ }
+
+ void TestOpenOutputStreamCloseAsyncDestructor() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+ std::shared_ptr<io::OutputStream> stream;
+ auto data = SetUpPreexistingData();
+ const std::string path = data.ContainerPath("test-write-object");
+ constexpr auto payload = PreexistingData::kLoremIpsum;
+
+ ASSERT_OK_AND_ASSIGN(stream, fs->OpenOutputStream(path));
+ ASSERT_OK(stream->Write(payload));
+ // Destructor implicitly closes stream and completes the multipart upload.
+ // GH-37670: Testing it doesn't matter whether flush is triggered
asynchronously
+ // after CloseAsync or synchronously after stream.reset() since we're just
+ // checking that `closeAsyncFut` keeps the stream alive until completion
+ // rather than segfaulting on a dangling stream
+ auto close_fut = stream->CloseAsync();
+ stream.reset();
+ ASSERT_OK(close_fut.MoveResult());
+
+ AssertObjectContents(fs.get(), path, payload);
+ }
+
+ void TestOpenOutputStreamDestructor() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+ std::shared_ptr<io::OutputStream> stream;
+ constexpr auto* payload = "new data";
+ auto data = SetUpPreexistingData();
+ const std::string path = data.ContainerPath("test-write-object");
+
+ ASSERT_OK_AND_ASSIGN(stream, fs->OpenOutputStream(path));
Review Comment:
```suggestion
constexpr auto* payload = "new data";
auto data = SetUpPreexistingData();
const std::string path = data.ContainerPath("test-write-object");
ASSERT_OK_AND_ASSIGN(auto stream, fs->OpenOutputStream(path));
```
##########
cpp/src/arrow/filesystem/azurefs.h:
##########
@@ -112,6 +112,9 @@ struct ARROW_EXPORT AzureOptions {
/// This will be ignored if non-empty metadata is passed to OpenOutputStream.
std::shared_ptr<const KeyValueMetadata> default_metadata;
+ /// Whether OutputStream writes will be issued in the background, without
blocking.
+ bool background_writes = true;
Review Comment:
How about referring `ARROW_ENABLE_THREADING` here?
```suggestion
#ifdef ARROW_ENABLE_THREADING
bool background_writes = true;
#else
bool background_writes = false;
#endif
```
##########
cpp/src/arrow/filesystem/azurefs_test.cc:
##########
@@ -1471,6 +1473,93 @@ class TestAzureFileSystem : public ::testing::Test {
arrow::fs::AssertFileInfo(fs(), data.Path("dir/file0"), FileType::File);
}
+ void AssertObjectContents(AzureFileSystem* fs, std::string_view path,
+ std::string_view expected) {
+ ASSERT_OK_AND_ASSIGN(auto input, fs->OpenInputStream(std::string{path}));
+ std::string contents;
+ std::shared_ptr<Buffer> buffer;
+ do {
+ ASSERT_OK_AND_ASSIGN(buffer, input->Read(128 * 1024));
+ ASSERT_TRUE(buffer);
+ contents.append(buffer->ToString());
+ } while (buffer->size() != 0);
+
+ EXPECT_EQ(expected, contents);
+ }
+
+ void TestOpenOutputStreamSmall() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ const std::string_view expected(PreexistingData::kLoremIpsum);
+ ASSERT_OK(output->Write(expected));
+ ASSERT_OK(output->Close());
+
+ // Verify we can read the object back.
+ AssertObjectContents(fs.get(), path, expected);
+ }
+
+ void TestOpenOutputStreamLarge() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ std::array<std::int64_t, 3> sizes{2570 * 1024, 258 * 1024, 259 * 1024};
+ std::array<std::string, 3> buffers{
+ std::string(sizes[0], 'A'),
+ std::string(sizes[1], 'B'),
+ std::string(sizes[2], 'C'),
+ };
+ auto expected = std::int64_t{0};
+ for (auto i = 0; i != 3; ++i) {
Review Comment:
Can we use `i < buffers.size()` here?
##########
cpp/src/arrow/filesystem/azurefs_test.cc:
##########
@@ -1471,6 +1473,93 @@ class TestAzureFileSystem : public ::testing::Test {
arrow::fs::AssertFileInfo(fs(), data.Path("dir/file0"), FileType::File);
}
+ void AssertObjectContents(AzureFileSystem* fs, std::string_view path,
+ std::string_view expected) {
+ ASSERT_OK_AND_ASSIGN(auto input, fs->OpenInputStream(std::string{path}));
+ std::string contents;
+ std::shared_ptr<Buffer> buffer;
+ do {
+ ASSERT_OK_AND_ASSIGN(buffer, input->Read(128 * 1024));
+ ASSERT_TRUE(buffer);
+ contents.append(buffer->ToString());
+ } while (buffer->size() != 0);
+
+ EXPECT_EQ(expected, contents);
+ }
+
+ void TestOpenOutputStreamSmall() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ const std::string_view expected(PreexistingData::kLoremIpsum);
+ ASSERT_OK(output->Write(expected));
+ ASSERT_OK(output->Close());
+
+ // Verify we can read the object back.
+ AssertObjectContents(fs.get(), path, expected);
+ }
+
+ void TestOpenOutputStreamLarge() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+
+ auto data = SetUpPreexistingData();
+ const auto path = data.ContainerPath("test-write-object");
+ ASSERT_OK_AND_ASSIGN(auto output, fs->OpenOutputStream(path, {}));
+ std::array<std::int64_t, 3> sizes{2570 * 1024, 258 * 1024, 259 * 1024};
+ std::array<std::string, 3> buffers{
+ std::string(sizes[0], 'A'),
+ std::string(sizes[1], 'B'),
+ std::string(sizes[2], 'C'),
+ };
+ auto expected = std::int64_t{0};
+ for (auto i = 0; i != 3; ++i) {
+ ASSERT_OK(output->Write(buffers[i]));
+ expected += sizes[i];
+ ASSERT_EQ(expected, output->Tell());
+ }
+ ASSERT_OK(output->Close());
+
+ AssertObjectContents(fs.get(), path, buffers[0] + buffers[1] + buffers[2]);
+ }
+
+ void TestOpenOutputStreamCloseAsyncDestructor() {
+ ASSERT_OK_AND_ASSIGN(auto fs, AzureFileSystem::Make(options_));
+ std::shared_ptr<io::OutputStream> stream;
+ auto data = SetUpPreexistingData();
+ const std::string path = data.ContainerPath("test-write-object");
+ constexpr auto payload = PreexistingData::kLoremIpsum;
+
+ ASSERT_OK_AND_ASSIGN(stream, fs->OpenOutputStream(path));
Review Comment:
```suggestion
auto data = SetUpPreexistingData();
const std::string path = data.ContainerPath("test-write-object");
constexpr auto payload = PreexistingData::kLoremIpsum;
ASSERT_OK_AND_ASSIGN(auto stream, fs->OpenOutputStream(path));
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]