[
https://issues.apache.org/jira/browse/PARQUET-1360?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16561304#comment-16561304
]
ASF GitHub Bot commented on PARQUET-1360:
-
wesm closed pull request #482: PARQUET-1360: Use conforming API style, variable
names in WriteFileMetaData functions
URL: https://github.com/apache/parquet-cpp/pull/482
This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:
As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b53f5980..927f7289 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -84,7 +84,7 @@ enable_testing()
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake_modules")
set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support")
-set(CLANG_FORMAT_VERSION "5.0")
+set(CLANG_FORMAT_VERSION "6.0")
find_package(ClangTools)
if ("$ENV{CMAKE_EXPORT_COMPILE_COMMANDS}" STREQUAL "1" OR CLANG_TIDY_FOUND)
# Generate a Clang compile_commands.json "compilation database" file for use
diff --git a/src/parquet/arrow/arrow-reader-writer-test.cc
b/src/parquet/arrow/arrow-reader-writer-test.cc
index 02b8d528..d4f5b000 100644
--- a/src/parquet/arrow/arrow-reader-writer-test.cc
+++ b/src/parquet/arrow/arrow-reader-writer-test.cc
@@ -52,16 +52,16 @@ using arrow::Buffer;
using arrow::ChunkedArray;
using arrow::Column;
using arrow::DataType;
+using arrow::default_memory_pool;
using arrow::ListArray;
-using arrow::ResizableBuffer;
using arrow::PrimitiveArray;
+using arrow::ResizableBuffer;
using arrow::Status;
using arrow::Table;
using arrow::TimeUnit;
using arrow::compute::Datum;
using arrow::compute::DictionaryEncode;
using arrow::compute::FunctionContext;
-using arrow::default_memory_pool;
using arrow::io::BufferReader;
using arrow::test::randint;
@@ -861,26 +861,21 @@ TYPED_TEST(TestParquetIO, FileMetaDataWrite) {
std::unique_ptr reader;
ASSERT_NO_FATAL_FAILURE(this->ReaderFromSink());
- const std::shared_ptr fileMetaData =
reader->parquet_reader()->metadata();
- ASSERT_EQ(1, fileMetaData->num_columns());
- ASSERT_EQ(100, fileMetaData->num_rows());
+ auto metadata = reader->parquet_reader()->metadata();
+ ASSERT_EQ(1, metadata->num_columns());
+ ASSERT_EQ(100, metadata->num_rows());
this->sink_ = std::make_shared();
- std::unique_ptr uniqueFileMetaData(fileMetaData.get());
-
- ASSERT_OK_NO_THROW(FileWriter::WriteMetaData(uniqueFileMetaData,
this->sink_));
+ ASSERT_OK_NO_THROW(::parquet::arrow::WriteFileMetaData(*metadata,
this->sink_.get()));
ASSERT_NO_FATAL_FAILURE(this->ReaderFromSink());
- const std::shared_ptr fileMetaDataWritten =
- reader->parquet_reader()->metadata();
- ASSERT_EQ(fileMetaData->size(), fileMetaDataWritten->size());
- ASSERT_EQ(fileMetaData->num_row_groups(),
fileMetaDataWritten->num_row_groups());
- ASSERT_EQ(fileMetaData->num_rows(), fileMetaDataWritten->num_rows());
- ASSERT_EQ(fileMetaData->num_columns(), fileMetaDataWritten->num_columns());
- ASSERT_EQ(fileMetaData->RowGroup(0)->num_rows(),
-fileMetaDataWritten->RowGroup(0)->num_rows());
- uniqueFileMetaData.release();
+ auto metadata_written = reader->parquet_reader()->metadata();
+ ASSERT_EQ(metadata->size(), metadata_written->size());
+ ASSERT_EQ(metadata->num_row_groups(), metadata_written->num_row_groups());
+ ASSERT_EQ(metadata->num_rows(), metadata_written->num_rows());
+ ASSERT_EQ(metadata->num_columns(), metadata_written->num_columns());
+ ASSERT_EQ(metadata->RowGroup(0)->num_rows(),
metadata_written->RowGroup(0)->num_rows());
}
using TestInt96ParquetIO = TestParquetIO<::arrow::TimestampType>;
@@ -1485,13 +1480,13 @@ TEST(TestArrowReadWrite, ConvertedDateTimeTypes) {
// Regression for ARROW-2802
TEST(TestArrowReadWrite, CoerceTimestampsAndSupportDeprecatedInt96) {
using ::arrow::Column;
+ using ::arrow::default_memory_pool;
using ::arrow::Field;
using ::arrow::Schema;
using ::arrow::Table;
- using ::arrow::TimeUnit;
using ::arrow::TimestampBuilder;
using ::arrow::TimestampType;
- using ::arrow::default_memory_pool;
+ using ::arrow::TimeUnit;
auto timestamp_type = std::make_shared(TimeUnit::NANO);
diff --git a/src/parquet/arrow/arrow-schema-test.cc
b/src/parquet/arrow/arrow-schema-test.cc
index 5c16c044..cb2b8508 100644
--- a/src/parquet/arrow/arrow-schema-test.cc
+++ b/src/parquet/arrow/arrow-schema-test.cc
@@ -62,8 +62,8 @@ class TestConvertParquetSchema : public ::testing::Test {
for (int i = 0; i < expected_schema->num_fields(); ++i) {
auto lhs = result_schema_->field(i);
auto rhs = expected_schema->field(i);
- EXPECT_TRUE(lhs->Equals(rhs)) << i << " " << lhs->ToString()
-<< " != " << rhs->ToString();
+