tolleybot commented on code in PR #34616: URL: https://github.com/apache/arrow/pull/34616#discussion_r1270936312
########## cpp/src/arrow/dataset/dataset_encryption_test.cc: ########## @@ -0,0 +1,412 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include <string_view> + +#include "gtest/gtest.h" + +#include "arrow/api.h" +#include "arrow/array/builder_primitive.h" +#include "arrow/builder.h" +#include "arrow/dataset/api.h" +#include "arrow/dataset/parquet_encryption_config.h" +#include "arrow/dataset/partition.h" +#include "arrow/filesystem/mockfs.h" +#include "arrow/io/api.h" +#include "arrow/status.h" +#include "arrow/table.h" +#include "arrow/testing/gtest_util.h" +#include "parquet/arrow/reader.h" +#include "parquet/encryption/test_in_memory_kms.h" + +constexpr std::string_view kFooterKeyMasterKey = "0123456789012345"; +constexpr std::string_view kFooterKeyMasterKeyId = "footer_key"; +constexpr std::string_view kColumnMasterKeys[] = {"1234567890123450"}; +constexpr std::string_view kColumnMasterKeysIds[] = {"col_key"}; +constexpr std::string_view kBaseDir = ""; +const int kNumColumns = 1; + +using arrow::internal::checked_pointer_cast; + +namespace arrow { +namespace dataset { + +class DatasetEncryptionTest : public ::testing::Test { + protected: + // This function creates a mock file system using the current time point, creates a + // directory with the given base directory path, and writes a dataset to it using + // provided Parquet file write options. The dataset is partitioned using a Hive + // partitioning scheme. The function also checks if the written files exist in the file + // system. + ::arrow::Result<std::shared_ptr<::arrow::fs::FileSystem>> + CreateMockFileSystemAndWriteData( + const std::string& base_dir, + const std::shared_ptr<FileWriteOptions>& parquet_file_write_options) { + // Create our mock file system + ::arrow::fs::TimePoint mock_now = std::chrono::system_clock::now(); + ARROW_ASSIGN_OR_RAISE(auto file_system, + ::arrow::fs::internal::MockFileSystem::Make(mock_now, {})); + // Create filesystem + RETURN_NOT_OK(file_system->CreateDir(base_dir)); + + auto partition_schema = ::arrow::schema({::arrow::field("part", ::arrow::utf8())}); + auto partitioning = + std::make_shared<::arrow::dataset::HivePartitioning>(partition_schema); + + // ----- Write the Dataset ---- + auto dataset_out = BuildTable(); + ARROW_ASSIGN_OR_RAISE(auto scanner_builder_out, dataset_out->NewScan()); + ARROW_ASSIGN_OR_RAISE(auto scanner_out, scanner_builder_out->Finish()); + + ::arrow::dataset::FileSystemDatasetWriteOptions write_options; + write_options.file_write_options = parquet_file_write_options; + write_options.filesystem = file_system; + write_options.base_dir = base_dir; + write_options.partitioning = partitioning; + write_options.basename_template = "part{i}.parquet"; + RETURN_NOT_OK(::arrow::dataset::FileSystemDataset::Write(write_options, scanner_out)); + + std::vector<std::string> files = {"part=a/part0.parquet", "part=b/part0.parquet", + "part=c/part0.parquet", "part=d/part0.parquet", + "part=e/part0.parquet", "part=f/part0.parquet", + "part=g/part0.parquet", "part=h/part0.parquet", + "part=i/part0.parquet", "part=j/part0.parquet"}; + ValidateFilesExist(file_system, files); + + return file_system; + } + + // Create dataset encryption properties + std::pair<std::shared_ptr<ParquetEncryptionConfig>, + std::shared_ptr<ParquetDecryptionConfig>> + CreateParquetEncryptionConfig(const std::string_view* column_ids, + const std::string_view* column_keys, int num_columns, + std::string_view footer_id, std::string_view footer_key, + std::string_view footer_key_name = "footer_key", + std::string_view column_key_mapping = "col_key: a") { + auto key_list = + BuildKeyMap(column_ids, column_keys, num_columns, footer_id, footer_key); + + auto crypto_factory = SetupCryptoFactory(/*wrap_locally=*/true, key_list); + + auto encryption_config = + std::make_shared<::parquet::encryption::EncryptionConfiguration>( + std::string(footer_key_name)); + encryption_config->column_keys = column_key_mapping; + if (footer_key_name.size() > 0) { + encryption_config->footer_key = footer_key_name; + } + + auto kms_connection_config = + std::make_shared<parquet::encryption::KmsConnectionConfig>(); + + ParquetEncryptionConfig parquet_encryption_config; + parquet_encryption_config.Setup(crypto_factory, kms_connection_config, + encryption_config); + auto decryption_config = + std::make_shared<parquet::encryption::DecryptionConfiguration>(); + ParquetDecryptionConfig parquet_decryption_config; + parquet_decryption_config.Setup(crypto_factory, kms_connection_config, + decryption_config); + return std::make_pair( + std::make_shared<ParquetEncryptionConfig>(parquet_encryption_config), + std::make_shared<ParquetDecryptionConfig>(parquet_decryption_config)); + } + + // Utility to build the key map + std::unordered_map<std::string, std::string> BuildKeyMap( + const std::string_view* column_ids, const std::string_view* column_keys, + int num_columns, const std::string_view& footer_id, + const std::string_view& footer_key) { + std::unordered_map<std::string, std::string> key_map; + // Add column keys + for (int i = 0; i < num_columns; i++) { + key_map.insert({std::string(column_ids[i]), std::string(column_keys[i])}); + } + // Add footer key + key_map.insert({std::string(footer_id), std::string(footer_key)}); + + return key_map; + } + + // A utility function to validate our files were written out */ + void ValidateFilesExist(const std::shared_ptr<arrow::fs::FileSystem>& fs, + const std::vector<std::string>& files) { + for (const auto& file_path : files) { + ASSERT_OK_AND_ASSIGN(auto result, fs->GetFileInfo(file_path)); + + ASSERT_EQ(result.type(), arrow::fs::FileType::File); + } + } + + // Build a dummy table + std::shared_ptr<::arrow::dataset::InMemoryDataset> BuildTable() { + // Create an Arrow Table + auto schema = arrow::schema( + {arrow::field("a", arrow::int64()), arrow::field("b", arrow::int64()), + arrow::field("c", arrow::int64()), arrow::field("part", arrow::utf8())}); + + std::vector<std::shared_ptr<arrow::Array>> arrays(4); + arrow::NumericBuilder<arrow::Int64Type> builder; + ARROW_EXPECT_OK(builder.AppendValues({0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); + ARROW_EXPECT_OK(builder.Finish(&arrays[0])); + builder.Reset(); + ARROW_EXPECT_OK(builder.AppendValues({9, 8, 7, 6, 5, 4, 3, 2, 1, 0})); + ARROW_EXPECT_OK(builder.Finish(&arrays[1])); + builder.Reset(); + ARROW_EXPECT_OK(builder.AppendValues({1, 2, 1, 2, 1, 2, 1, 2, 1, 2})); + ARROW_EXPECT_OK(builder.Finish(&arrays[2])); + arrow::StringBuilder string_builder; + ARROW_EXPECT_OK( + string_builder.AppendValues({"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"})); + ARROW_EXPECT_OK(string_builder.Finish(&arrays[3])); + + auto table = arrow::Table::Make(schema, arrays); + + // Write it using Datasets + return std::make_shared<::arrow::dataset::InMemoryDataset>(table); + } + + // Helper function to create crypto factory and setup + std::shared_ptr<::parquet::encryption::CryptoFactory> SetupCryptoFactory( + bool wrap_locally, const std::unordered_map<std::string, std::string>& key_list) { + auto crypto_factory = std::make_shared<::parquet::encryption::CryptoFactory>(); + + auto kms_client_factory = + std::make_shared<::parquet::encryption::TestOnlyInMemoryKmsClientFactory>( + wrap_locally, key_list); + + crypto_factory->RegisterKmsClientFactory(kms_client_factory); + + return crypto_factory; + } +}; +// Write dataset to disk with encryption +// The aim of this test is to demonstrate the process of writing a partitioned +// Parquet file while applying distinct file encryption properties to each +// file within the test. This is based on the selected columns. Review Comment: Yes, feel free to edit as you see fit. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
