kszucs commented on code in PR #45360:
URL: https://github.com/apache/arrow/pull/45360#discussion_r1985543223


##########
cpp/src/parquet/chunker_internal_test.cc:
##########
@@ -0,0 +1,908 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <gtest/gtest.h>
+#include <algorithm>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "arrow/table.h"
+#include "arrow/type_fwd.h"
+#include "arrow/util/float16.h"
+#include "parquet/arrow/reader.h"
+#include "parquet/arrow/reader_internal.h"
+#include "parquet/arrow/test_util.h"
+#include "parquet/arrow/writer.h"
+#include "parquet/column_writer.h"
+#include "parquet/file_writer.h"
+
+namespace parquet {
+
+using ::arrow::Array;
+using ::arrow::ChunkedArray;
+using ::arrow::ConcatenateTables;
+using ::arrow::DataType;
+using ::arrow::default_memory_pool;
+using ::arrow::Field;
+using ::arrow::Result;
+using ::arrow::Table;
+using ::arrow::io::BufferReader;
+using ::parquet::arrow::FileReader;
+using ::parquet::arrow::FileReaderBuilder;
+using ::parquet::arrow::MakeSimpleTable;
+using ::parquet::arrow::NonNullArray;
+using ::parquet::arrow::WriteTable;
+
+using ::testing::Bool;
+using ::testing::Combine;
+using ::testing::Values;
+
+// generate determinisic and platform-independent data
+inline uint64_t hash(uint64_t seed, uint64_t index) {
+  uint64_t h = (index + seed) * 0xc4ceb9fe1a85ec53ull;
+  h ^= h >> 33;
+  h *= 0xff51afd7ed558ccdull;
+  h ^= h >> 33;
+  h *= 0xc4ceb9fe1a85ec53ull;
+  h ^= h >> 33;
+  return h;
+}
+
+#define GENERATE_CASE_BODY(BUILDER_TYPE, VALUE_EXPR)   \
+  {                                                    \
+    BUILDER_TYPE builder(type, default_memory_pool()); \
+    if (nullable) {                                    \
+      for (int64_t i = 0; i < length; ++i) {           \
+        uint64_t val = hash(seed, i);                  \
+        if (val % 10 == 0) {                           \
+          RETURN_NOT_OK(builder.AppendNull());         \
+        } else {                                       \
+          RETURN_NOT_OK(builder.Append(VALUE_EXPR));   \
+        }                                              \
+      }                                                \
+    } else {                                           \
+      for (int64_t i = 0; i < length; ++i) {           \
+        uint64_t val = hash(seed, i);                  \
+        RETURN_NOT_OK(builder.Append(VALUE_EXPR));     \
+      }                                                \
+    }                                                  \
+    std::shared_ptr<Array> array;                      \
+    RETURN_NOT_OK(builder.Finish(&array));             \
+    RETURN_NOT_OK(array->ValidateFull());              \
+    return array;                                      \
+  }
+
+// Macro to generate a case for a given scalar type.
+#define GENERATE_CASE(TYPE_ID, BUILDER_TYPE, VALUE_EXPR) \
+  case ::arrow::Type::TYPE_ID: {                         \
+    GENERATE_CASE_BODY(BUILDER_TYPE, VALUE_EXPR)         \
+  }
+
+Result<std::shared_ptr<Array>> GenerateArray(const std::shared_ptr<Field>& 
field,
+                                             int64_t length, uint64_t seed) {
+  const std::shared_ptr<DataType>& type = field->type();
+  bool nullable = field->nullable();
+
+  switch (type->id()) {
+    GENERATE_CASE(BOOL, ::arrow::BooleanBuilder, (val % 2 == 0))
+
+    // Numeric types.
+    GENERATE_CASE(INT8, ::arrow::Int8Builder, static_cast<int8_t>(val))
+    GENERATE_CASE(INT16, ::arrow::Int16Builder, static_cast<int16_t>(val))
+    GENERATE_CASE(INT32, ::arrow::Int32Builder, static_cast<int32_t>(val))
+    GENERATE_CASE(INT64, ::arrow::Int64Builder, static_cast<int64_t>(val))
+    GENERATE_CASE(UINT8, ::arrow::UInt8Builder, static_cast<uint8_t>(val))
+    GENERATE_CASE(UINT16, ::arrow::UInt16Builder, static_cast<uint16_t>(val))
+    GENERATE_CASE(UINT32, ::arrow::UInt32Builder, static_cast<uint32_t>(val))
+    GENERATE_CASE(UINT64, ::arrow::UInt64Builder, static_cast<uint64_t>(val))
+    GENERATE_CASE(HALF_FLOAT, ::arrow::HalfFloatBuilder,
+                  static_cast<uint16_t>(val % 1000))
+    GENERATE_CASE(FLOAT, ::arrow::FloatBuilder, static_cast<float>(val % 1000) 
/ 1000.0f)
+    GENERATE_CASE(DOUBLE, ::arrow::DoubleBuilder,
+                  static_cast<double>(val % 100000) / 1000.0)
+    case ::arrow::Type::DECIMAL128: {
+      const auto& decimal_type = static_cast<const 
::arrow::Decimal128Type&>(*type);
+      // Limit the value to fit within the specified precision
+      int32_t max_exponent = decimal_type.precision() - decimal_type.scale();
+      int64_t max_value = static_cast<int64_t>(std::pow(10, max_exponent) - 1);
+      GENERATE_CASE_BODY(::arrow::Decimal128Builder, ::arrow::Decimal128(val % 
max_value))
+    }
+    case ::arrow::Type::DECIMAL256: {
+      const auto& decimal_type = static_cast<const 
::arrow::Decimal256Type&>(*type);
+      // Limit the value to fit within the specified precision, capped at 9 to 
avoid
+      // int64_t overflow
+      int32_t max_exponent = std::min(9, decimal_type.precision() - 
decimal_type.scale());
+      int64_t max_value = static_cast<int64_t>(std::pow(10, max_exponent) - 1);
+      GENERATE_CASE_BODY(::arrow::Decimal256Builder, ::arrow::Decimal256(val % 
max_value))
+    }
+
+      // Temporal types
+      GENERATE_CASE(DATE32, ::arrow::Date32Builder, static_cast<int32_t>(val))
+      GENERATE_CASE(TIME32, ::arrow::Time32Builder,
+                    std::abs(static_cast<int32_t>(val) % 86400000))
+      GENERATE_CASE(TIME64, ::arrow::Time64Builder,
+                    std::abs(static_cast<int64_t>(val) % 86400000000))
+      GENERATE_CASE(TIMESTAMP, ::arrow::TimestampBuilder, 
static_cast<int64_t>(val))
+      GENERATE_CASE(DURATION, ::arrow::DurationBuilder, 
static_cast<int64_t>(val))
+
+      // Binary and string types.
+      GENERATE_CASE(STRING, ::arrow::StringBuilder,
+                    std::string("str_") + std::to_string(val))
+      GENERATE_CASE(LARGE_STRING, ::arrow::LargeStringBuilder,
+                    std::string("str_") + std::to_string(val))
+      GENERATE_CASE(BINARY, ::arrow::BinaryBuilder,
+                    std::string("bin_") + std::to_string(val))
+    case ::arrow::Type::FIXED_SIZE_BINARY: {
+      auto size = 
static_cast<::arrow::FixedSizeBinaryType*>(type.get())->byte_width();
+      GENERATE_CASE_BODY(::arrow::FixedSizeBinaryBuilder,
+                         std::string("bin_") + std::to_string(val).substr(0, 
size - 4))
+    }
+
+    case ::arrow::Type::STRUCT: {
+      auto struct_type = static_cast<::arrow::StructType*>(type.get());
+      std::vector<std::shared_ptr<Array>> child_arrays;
+      for (auto i = 0; i < struct_type->num_fields(); i++) {
+        ARROW_ASSIGN_OR_RAISE(auto child_array,
+                              GenerateArray(struct_type->field(i), length,
+                                            seed + static_cast<uint64_t>(i + 
300)));
+        child_arrays.push_back(child_array);
+      }
+      auto struct_array =
+          std::make_shared<::arrow::StructArray>(type, length, child_arrays);
+      return struct_array;
+    }
+
+    case ::arrow::Type::LIST: {
+      auto list_type = static_cast<::arrow::ListType*>(type.get());
+      auto value_field = ::arrow::field("item", list_type->value_type());
+      ARROW_ASSIGN_OR_RAISE(auto values_array, GenerateArray(value_field, 
length, seed));
+      auto offset_builder = ::arrow::Int32Builder();
+      auto bitmap_builder = ::arrow::TypedBufferBuilder<bool>();
+
+      int32_t num_nulls = 0;
+      int32_t num_elements = 0;
+      uint8_t element_size = 0;
+      int32_t current_offset = 0;
+      RETURN_NOT_OK(offset_builder.Append(current_offset));
+      while (current_offset < length) {
+        num_elements++;
+        auto is_valid = !(nullable && (num_elements % 10 == 0));
+        if (is_valid) {
+          RETURN_NOT_OK(bitmap_builder.Append(true));
+          current_offset += element_size;
+          if (current_offset > length) {
+            RETURN_NOT_OK(offset_builder.Append(static_cast<int32_t>(length)));
+            break;
+          } else {
+            RETURN_NOT_OK(offset_builder.Append(current_offset));
+          }
+        } else {
+          
RETURN_NOT_OK(offset_builder.Append(static_cast<int32_t>(current_offset)));
+          RETURN_NOT_OK(bitmap_builder.Append(false));
+          num_nulls++;
+        }
+
+        if (element_size > 4) {
+          element_size = 0;
+        } else {
+          element_size++;
+        }
+      }
+
+      std::shared_ptr<Array> offsets_array;
+      RETURN_NOT_OK(offset_builder.Finish(&offsets_array));
+      std::shared_ptr<Buffer> bitmap_buffer;
+      RETURN_NOT_OK(bitmap_builder.Finish(&bitmap_buffer));
+      ARROW_ASSIGN_OR_RAISE(
+          auto list_array, ::arrow::ListArray::FromArrays(
+                               type, *offsets_array, *values_array, 
default_memory_pool(),
+                               bitmap_buffer, num_nulls));
+      RETURN_NOT_OK(list_array->ValidateFull());
+      return list_array;
+    }
+
+    default:
+      return ::arrow::Status::NotImplemented("Unsupported data type " + 
type->ToString());
+  }
+}
+
+Result<std::shared_ptr<Table>> GenerateTable(
+    const std::shared_ptr<::arrow::Schema>& schema, int64_t size, uint64_t 
seed = 0) {
+  std::vector<std::shared_ptr<Array>> arrays;
+  for (const auto& field : schema->fields()) {
+    ARROW_ASSIGN_OR_RAISE(auto array, GenerateArray(field, size, seed));
+    arrays.push_back(array);
+  }
+  return Table::Make(schema, arrays, size);
+}
+
+Result<std::shared_ptr<Table>> ConcatAndCombine(
+    const std::vector<std::shared_ptr<Table>>& parts) {
+  // Concatenate and combine chunks so the table doesn't carry information 
about
+  // the modification points
+  ARROW_ASSIGN_OR_RAISE(auto table, ConcatenateTables(parts));
+  return table->CombineChunks();
+}
+
+Result<std::shared_ptr<Buffer>> WriteTableToBuffer(const 
std::shared_ptr<Table>& table,
+                                                   uint64_t min_chunk_size,
+                                                   uint64_t max_chunk_size,
+                                                   bool enable_dictionary = 
false,
+
+                                                   int64_t row_group_size = 
1024 * 1024) {
+  auto sink = CreateOutputStream();
+
+  auto builder = WriterProperties::Builder();
+  builder.enable_cdc()
+      ->cdc_size_range(min_chunk_size, max_chunk_size)
+      ->cdc_norm_factor(0);
+  if (enable_dictionary) {
+    builder.enable_dictionary();
+  } else {
+    builder.disable_dictionary();
+  }
+  auto write_props = builder.build();
+  auto arrow_props = ArrowWriterProperties::Builder().store_schema()->build();
+  RETURN_NOT_OK(WriteTable(*table, default_memory_pool(), sink, row_group_size,
+                           write_props, arrow_props));
+  return sink->Finish();
+}
+
+Result<std::shared_ptr<Table>> ReadTableFromBuffer(const 
std::shared_ptr<Buffer>& data) {
+  std::shared_ptr<Table> result;
+  FileReaderBuilder builder;
+  std::unique_ptr<FileReader> reader;
+  RETURN_NOT_OK(builder.Open(std::make_shared<BufferReader>(data)));
+  RETURN_NOT_OK(builder.memory_pool(::arrow::default_memory_pool())
+                    ->properties(default_arrow_reader_properties())
+                    ->Build(&reader));
+  RETURN_NOT_OK(reader->ReadTable(&result));
+  return result;
+}
+
+struct PageSizes {
+  std::vector<uint64_t> lengths;
+  std::vector<uint64_t> sizes;
+};
+
+PageSizes GetColumnPageSizes(const std::shared_ptr<Buffer>& data, int 
column_index = 0) {
+  // Read the parquet data out of the buffer and get the sizes and lengths of 
the
+  // data pages in given column. We assert on the sizes and lengths of the 
pages
+  // to ensure that the chunking is done correctly.
+  PageSizes result;
+
+  auto buffer_reader = std::make_shared<BufferReader>(data);
+  auto parquet_reader = ParquetFileReader::Open(std::move(buffer_reader));
+
+  auto metadata = parquet_reader->metadata();
+  for (int rg = 0; rg < metadata->num_row_groups(); rg++) {
+    auto page_reader = 
parquet_reader->RowGroup(rg)->GetColumnPageReader(column_index);
+    while (auto page = page_reader->NextPage()) {
+      if (page->type() == PageType::DATA_PAGE || page->type() == 
PageType::DATA_PAGE_V2) {
+        auto data_page = static_cast<DataPage*>(page.get());
+        result.sizes.push_back(data_page->size());
+        result.lengths.push_back(data_page->num_values());
+      }
+    }
+  }
+
+  return result;
+}
+
+Result<PageSizes> WriteAndGetPageSizes(const std::shared_ptr<Table>& table,
+                                       uint64_t min_chunk_size, uint64_t 
max_chunk_size,
+                                       bool enable_dictionary = false,
+                                       int column_index = 0) {
+  // Write the table to a buffer and read it back to get the page sizes
+  ARROW_ASSIGN_OR_RAISE(
+      auto buffer,
+      WriteTableToBuffer(table, min_chunk_size, max_chunk_size, 
enable_dictionary));
+  ARROW_ASSIGN_OR_RAISE(auto readback, ReadTableFromBuffer(buffer));
+
+  RETURN_NOT_OK(readback->ValidateFull());
+  if (readback->schema()->Equals(*table->schema())) {
+    ARROW_RETURN_IF(!readback->Equals(*table),
+                    Status::Invalid("Readback table not equal to original"));
+  }
+  return GetColumnPageSizes(buffer, column_index);
+}
+
+void AssertAllBetween(const std::vector<uint64_t>& values, uint64_t min, 
uint64_t max,
+                      bool expect_dictionary_fallback = false) {
+  // expect the last chunk since it is not guaranteed to be within the range
+  if (expect_dictionary_fallback) {
+    // if dictionary encoding is enabled, the writer can fallback to plain
+    // encoding splitting within a content defined chunk, so we can't
+    // guarantee that all chunks are within the range in this case, but we
+    // know that there can be at most 2 pages smaller than the min_chunk_size
+    size_t smaller_count = 0;
+    for (size_t i = 0; i < values.size() - 1; i++) {
+      if (values[i] < min) {
+        smaller_count++;
+      } else {
+        ASSERT_LE(values[i], max);
+      }
+    }
+    ASSERT_LE(smaller_count, 2);
+  } else {
+    for (size_t i = 0; i < values.size() - 1; i++) {
+      ASSERT_GE(values[i], min);
+      ASSERT_LE(values[i], max);
+    }
+  }
+  ASSERT_LE(values.back(), max);
+}
+
+std::vector<std::pair<std::vector<uint64_t>, std::vector<uint64_t>>> 
FindDifferences(
+    const std::vector<uint64_t>& first, const std::vector<uint64_t>& second) {
+  // Compute LCS table.
+  size_t n = first.size(), m = second.size();
+  std::vector<std::vector<size_t>> dp(n + 1, std::vector<size_t>(m + 1, 0));
+  for (size_t i = 0; i < n; i++) {
+    for (size_t j = 0; j < m; j++) {
+      if (first[i] == second[j]) {
+        dp[i + 1][j + 1] = dp[i][j] + 1;
+      } else {
+        dp[i + 1][j + 1] = std::max(dp[i + 1][j], dp[i][j + 1]);
+      }
+    }
+  }
+
+  // Backtrack to get common indices.
+  std::vector<std::pair<size_t, size_t>> common;

Review Comment:
   Extended the comment.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to