This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git


The following commit(s) were added to refs/heads/master by this push:
     new 55d711d78 KUDU-2671 add tests to check for number of hash buckets
55d711d78 is described below

commit 55d711d78a21c76718b06dede79de2e9b9d5e8e8
Author: Alexey Serbin <[email protected]>
AuthorDate: Fri Jul 15 11:43:00 2022 -0700

    KUDU-2671 add tests to check for number of hash buckets
    
    This patch adds a few test scenarios to cover the handling of invalid
    number of hash buckets for range-specific hash schemas in AlterTable
    RPCs.
    
    Change-Id: I6bdd728b5dea7fa864648e167a1a76b07c706e8f
    Reviewed-on: http://gerrit.cloudera.org:8080/18739
    Tested-by: Alexey Serbin <[email protected]>
    Reviewed-by: Alexey Serbin <[email protected]>
---
 .../java/org/apache/kudu/client/TestKuduTable.java | 49 +++++++++++
 src/kudu/client/flex_partitioning_client-test.cc   | 47 +++++++++++
 src/kudu/master/master-test.cc                     | 95 ++++++++++++++++++++++
 3 files changed, 191 insertions(+)

diff --git 
a/java/kudu-client/src/test/java/org/apache/kudu/client/TestKuduTable.java 
b/java/kudu-client/src/test/java/org/apache/kudu/client/TestKuduTable.java
index 80101d806..28c4ac66c 100644
--- a/java/kudu-client/src/test/java/org/apache/kudu/client/TestKuduTable.java
+++ b/java/kudu-client/src/test/java/org/apache/kudu/client/TestKuduTable.java
@@ -1700,6 +1700,55 @@ public class TestKuduTable {
     }
   }
 
+  @Test(timeout = 100000)
+  public void testAlterTableAddRangeCustomHashSchemaWrongBucketsNumber() 
throws Exception {
+    final List<ColumnSchema> columns = ImmutableList.of(
+        new ColumnSchema.ColumnSchemaBuilder("key", 
Type.INT32).key(true).build(),
+        new ColumnSchema.ColumnSchemaBuilder("value", Type.STRING).build());
+    final Schema schema = new Schema(columns);
+
+    CreateTableOptions options = getBasicCreateTableOptions();
+    // Add table-wide schema for the table.
+    options.addHashPartitions(ImmutableList.of("key"), 2, 0);
+    // Add a range partition with table-wide hash schema.
+    {
+      PartialRow lower = schema.newPartialRow();
+      lower.addInt(0, -100);
+      PartialRow upper = schema.newPartialRow();
+      upper.addInt(0, 0);
+      options.addRangePartition(lower, upper);
+    }
+
+    client.createTable(tableName, schema, options);
+
+    PartialRow lower = schema.newPartialRow();
+    lower.addInt(0, 0);
+    PartialRow upper = schema.newPartialRow();
+    upper.addInt(0, 100);
+
+    // Try to add range with a single hash bucket -- it should not be possible.
+    for (int hashBucketNum = -1; hashBucketNum < 2; ++hashBucketNum) {
+      try {
+        RangePartitionWithCustomHashSchema range =
+            new RangePartitionWithCustomHashSchema(
+                lower,
+                upper,
+                RangePartitionBound.INCLUSIVE_BOUND,
+                RangePartitionBound.EXCLUSIVE_BOUND);
+        range.addHashPartitions(ImmutableList.of("key"), hashBucketNum, 0);
+
+        client.alterTable(tableName, new 
AlterTableOptions().addRangePartition(range));
+        fail(String.format("should not be able to add a partition with " +
+            "invalid range-specific hash schema of %d hash buckets", 
hashBucketNum));
+      } catch (KuduException ex) {
+        final String errmsg = ex.getMessage();
+        assertTrue(errmsg, ex.getStatus().isInvalidArgument());
+        assertTrue(String.format("%d hash buckets: %s", hashBucketNum, errmsg),
+            errmsg.matches("must have at least two hash buckets"));
+      }
+    }
+  }
+
   @Test(timeout = 100000)
   @KuduTestHarness.MasterServerConfig(flags = {
       "--enable_per_range_hash_schemas=false",
diff --git a/src/kudu/client/flex_partitioning_client-test.cc 
b/src/kudu/client/flex_partitioning_client-test.cc
index 77e67d990..1d3909940 100644
--- a/src/kudu/client/flex_partitioning_client-test.cc
+++ b/src/kudu/client/flex_partitioning_client-test.cc
@@ -2036,5 +2036,52 @@ TEST_F(FlexPartitioningScanTest, MaxKeyValue) {
   }
 }
 
+// Try adding range partition with custom hash schema where the number of
+// hash buckets is invalid.
+TEST_F(FlexPartitioningAlterTableTest, AddRangeWithWrongHashBucketsNumber) {
+  constexpr const char* const kCol0 = "c0";
+  constexpr const char* const kCol1 = "c1";
+  constexpr const char* const kErrMsg =
+      "at least two buckets are required to establish hash partitioning";
+  constexpr const char* const kTableName = 
"AddRangeWithWrongHashBucketsNumber";
+
+  KuduSchema schema;
+  {
+    KuduSchemaBuilder b;
+    b.AddColumn(kCol0)->Type(KuduColumnSchema::INT32)->NotNull()->PrimaryKey();
+    b.AddColumn(kCol1)->Type(KuduColumnSchema::STRING)->Nullable();
+    ASSERT_OK(b.Build(&schema));
+  }
+
+  unique_ptr<KuduTableCreator> table_creator(client_->NewTableCreator());
+  table_creator->table_name(kTableName)
+      .schema(&schema)
+      .num_replicas(1)
+      .add_hash_partitions({ kCol0 }, 2)
+      .set_range_partition_columns({ kCol0 });
+
+  // Add a range partition with the table-wide hash schema.
+  {
+    unique_ptr<KuduPartialRow> lower(schema.NewRow());
+    ASSERT_OK(lower->SetInt32(kCol0, -100));
+    unique_ptr<KuduPartialRow> upper(schema.NewRow());
+    ASSERT_OK(upper->SetInt32(kCol0, 0));
+    table_creator->add_range_partition(lower.release(), upper.release());
+  }
+  ASSERT_OK(table_creator->Create());
+
+  // Try to add hash partitions with wrong number of buckets in the
+  // range-specific hash schema. In Kudu C++ client, such mistakes are caught
+  // at the client side.
+  for (auto hash_bucket_num = -1; hash_bucket_num < 2; ++hash_bucket_num) {
+    SCOPED_TRACE(Substitute("hash schema with $0 buckets", hash_bucket_num));
+    unique_ptr<KuduTableAlterer> alterer(client_->NewTableAlterer(kTableName));
+    auto p = CreateRangePartition(schema, kCol0, 0, 100);
+    const auto s = p->add_hash_partitions({ kCol0 }, hash_bucket_num, 0);
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(), kErrMsg);
+  }
+}
+
 } // namespace client
 } // namespace kudu
diff --git a/src/kudu/master/master-test.cc b/src/kudu/master/master-test.cc
index 7f2a61d83..3a5002be0 100644
--- a/src/kudu/master/master-test.cc
+++ b/src/kudu/master/master-test.cc
@@ -1275,6 +1275,101 @@ TEST_F(MasterTest, 
AlterTableAddAndDropRangeWithSpecificHashSchema) {
   }
 }
 
+TEST_F(MasterTest, AlterTableAddRangeWithSpecificHashSchemaWrongBucketNumber) {
+  constexpr const char* const kTableName = 
"wrong_bucket_number_in_hash_schema";
+  constexpr const char* const kCol0 = "c_int32";
+  constexpr const char* const kCol1 = "c_int64";
+  const Schema kTableSchema({ColumnSchema(kCol0, INT32),
+                             ColumnSchema(kCol1, INT64)}, 1);
+  FLAGS_default_num_replicas = 1;
+
+  // Create a table with one range partition based on the table-wide hash 
schema.
+  CreateTableResponsePB create_table_resp;
+  {
+    KuduPartialRow lower(&kTableSchema);
+    ASSERT_OK(lower.SetInt32(kCol0, 0));
+    KuduPartialRow upper(&kTableSchema);
+    ASSERT_OK(upper.SetInt32(kCol0, 100));
+    ASSERT_OK(CreateTable(
+        kTableName, kTableSchema, nullopt, nullopt, nullopt, {}, {{lower, 
upper}},
+        {}, {{{kCol0}, 2, 0}}, &create_table_resp));
+  }
+
+  // Check the number of tablets in the table before ALTER TABLE.
+  {
+    CatalogManager::ScopedLeaderSharedLock l(master_->catalog_manager());
+    std::vector<scoped_refptr<TableInfo>> tables;
+    master_->catalog_manager()->GetAllTables(&tables);
+    ASSERT_EQ(1, tables.size());
+    ASSERT_EQ(2, tables.front()->num_tablets());
+  }
+
+  const auto& table_id = create_table_resp.table_id();
+
+  // Try altering the table, adding a new range with custom hash schema where
+  // the number of hash buckets is incorrect.
+  for (auto bucket_num = -1; bucket_num < 2; ++bucket_num) {
+    SCOPED_TRACE(Substitute("number of hash buckets: $0", bucket_num));
+    const HashSchema custom_hash_schema{{{kCol0}, bucket_num, 222}};
+
+    AlterTableRequestPB req;
+    AlterTableResponsePB resp;
+    req.mutable_table()->set_table_name(kTableName);
+    req.mutable_table()->set_table_id(table_id);
+
+    // Add the required information on the table's schema:
+    // key and non-null columns must be present in the request.
+    {
+      auto* col = req.mutable_schema()->add_columns();
+      col->set_name(kCol0);
+      col->set_type(INT32);
+      col->set_is_key(true);
+    }
+    {
+      auto* col = req.mutable_schema()->add_columns();
+      col->set_name(kCol1);
+      col->set_type(INT64);
+    }
+
+    AlterTableRequestPB::Step* step = req.add_alter_schema_steps();
+    step->set_type(AlterTableRequestPB::ADD_RANGE_PARTITION);
+    KuduPartialRow lower(&kTableSchema);
+    ASSERT_OK(lower.SetInt32(kCol0, 100));
+    KuduPartialRow upper(&kTableSchema);
+    ASSERT_OK(upper.SetInt32(kCol0, 200));
+    RowOperationsPBEncoder enc(
+        step->mutable_add_range_partition()->mutable_range_bounds());
+    enc.Add(RowOperationsPB::RANGE_LOWER_BOUND, lower);
+    enc.Add(RowOperationsPB::RANGE_UPPER_BOUND, upper);
+    for (const auto& hash_dimension: custom_hash_schema) {
+      auto* hash_dimension_pb = step->mutable_add_range_partition()->
+          mutable_custom_hash_schema()->add_hash_schema();
+      for (const auto& col_name: hash_dimension.columns) {
+        hash_dimension_pb->add_columns()->set_name(col_name);
+      }
+      hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
+      hash_dimension_pb->set_seed(hash_dimension.seed);
+    }
+
+    RpcController ctl;
+    ASSERT_OK(proxy_->AlterTable(req, &resp, &ctl));
+    ASSERT_TRUE(resp.has_error());
+    const auto s = StatusFromPB(resp.error().status());
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(), "must have at least two hash buckets");
+  }
+
+  // One more sanity check: the number of tablets in the table after
+  // attempted, but failed ALTER TABLE should stay the same as before.
+  {
+    CatalogManager::ScopedLeaderSharedLock l(master_->catalog_manager());
+    std::vector<scoped_refptr<TableInfo>> tables;
+    master_->catalog_manager()->GetAllTables(&tables);
+    ASSERT_EQ(1, tables.size());
+    ASSERT_EQ(2, tables.front()->num_tablets());
+  }
+}
+
 // This scenario verifies that when the support for range-specific hash schemas
 // is enabled, adding and dropping range partitions with table-wide hash 
schemas
 // works as expected.

Reply via email to