This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git


The following commit(s) were added to refs/heads/master by this push:
     new 084640a  [partition] RangeHashSchema --> PerRangeHashBucketSchemas
084640a is described below

commit 084640a4027a7bc09a3975d0d9d528d64c68f512
Author: Alexey Serbin <[email protected]>
AuthorDate: Fri Jul 23 10:54:39 2021 -0700

    [partition] RangeHashSchema --> PerRangeHashBucketSchemas
    
    This patch simply renames the PartitionSchema::RangeHashSchema type
    into PartitionSchema::PerRangeHashBucketSchemas, updating the existing
    typedef for std::vector<HashBucketSchemas>.  I found it's easier to read
    and comprehend the code with this change because the PB-based
    PartitionSchemaPB::PerRangeHashBucketSchemasPB structure is very close
    to what PartitionSchema::PerRangeHashBucketSchemas is.
    
    In addition, this patch introduces a RangesWithHashSchemas typedef for
    vector<RangeWithHashSchemas> and updates the signature of the
    PartitionSchema::SplitRangeBounds() method.
    
    I also updated the code in master-test and table_locations-itest to
    introduce similar typedefs.  I found some more room for refactoring
    in those two test suites (e.g., there are similar data structures
    in both), but I decided not to address that right away to avoid
    bloating of this patch.
    
    This patch does not contain any functional changes.
    
    Change-Id: Ic64a956a87c3d66cb2a14f4a3da7f57948518f17
    Reviewed-on: http://gerrit.cloudera.org:8080/17724
    Tested-by: Alexey Serbin <[email protected]>
    Reviewed-by: Andrew Wong <[email protected]>
    Reviewed-by: Mahesh Reddy <[email protected]>
---
 src/kudu/common/partition-test.cc                  |  6 +-
 src/kudu/common/partition.cc                       | 22 ++++----
 src/kudu/common/partition.h                        | 18 +++---
 .../integration-tests/table_locations-itest.cc     | 31 ++++++-----
 src/kudu/master/catalog_manager.cc                 |  2 +-
 src/kudu/master/master-test.cc                     | 65 +++++++++++-----------
 6 files changed, 73 insertions(+), 71 deletions(-)

diff --git a/src/kudu/common/partition-test.cc 
b/src/kudu/common/partition-test.cc
index 299a33f..e405b55 100644
--- a/src/kudu/common/partition-test.cc
+++ b/src/kudu/common/partition-test.cc
@@ -904,7 +904,7 @@ TEST_F(PartitionTest, TestVaryingHashSchemasPerRange) {
             partition_schema.DebugString(schema));
 
   vector<pair<KuduPartialRow, KuduPartialRow>> bounds;
-  PartitionSchema::RangeHashSchema range_hash_schemas;
+  PartitionSchema::PerRangeHashBucketSchemas range_hash_schemas;
   vector<pair<pair<KuduPartialRow, KuduPartialRow>,
       PartitionSchema::HashBucketSchemas>> bounds_with_hash_schemas;
 
@@ -1189,7 +1189,7 @@ TEST_F(PartitionTest, CustomHashSchemasPerRangeOnly) {
 
   typedef pair<KuduPartialRow, KuduPartialRow> RangeBound;
   vector<RangeBound> bounds;
-  PartitionSchema::RangeHashSchema range_hash_schemas;
+  PartitionSchema::PerRangeHashBucketSchemas range_hash_schemas;
   vector<pair<RangeBound, PartitionSchema::HashBucketSchemas>>
       bounds_with_hash_schemas;
 
@@ -1252,7 +1252,7 @@ TEST_F(PartitionTest, 
TestVaryingHashSchemasPerUnboundedRanges) {
             partition_schema.DebugString(schema));
 
   vector<pair<KuduPartialRow, KuduPartialRow>> bounds;
-  PartitionSchema::RangeHashSchema range_hash_schemas;
+  PartitionSchema::PerRangeHashBucketSchemas range_hash_schemas;
 
   { // [(_, _, _), (a1, _, c1))
     KuduPartialRow lower(&schema);
diff --git a/src/kudu/common/partition.cc b/src/kudu/common/partition.cc
index c46d15c..d97068a 100644
--- a/src/kudu/common/partition.cc
+++ b/src/kudu/common/partition.cc
@@ -192,7 +192,7 @@ Status PartitionSchema::FromPB(const PartitionSchemaPB& pb,
   partition_schema->Clear();
   RETURN_NOT_OK(ExtractHashBucketSchemasFromPB(schema, 
pb.hash_bucket_schemas(),
                                                
&partition_schema->hash_bucket_schemas_));
-  RangeHashSchema range_hash_schema;
+  PerRangeHashBucketSchemas range_hash_schema;
   range_hash_schema.resize(pb.range_hash_schemas_size());
   for (int i = 0; i < pb.range_hash_schemas_size(); i++) {
     RETURN_NOT_OK(ExtractHashBucketSchemasFromPB(schema, 
pb.range_hash_schemas(i).hash_schemas(),
@@ -381,9 +381,9 @@ Status PartitionSchema::EncodeRangeSplits(const 
vector<KuduPartialRow>& split_ro
 
 Status PartitionSchema::EncodeRangeBounds(
     const vector<pair<KuduPartialRow, KuduPartialRow>>& range_bounds,
-    const RangeHashSchema& range_hash_schemas,
+    const PerRangeHashBucketSchemas& range_hash_schemas,
     const Schema& schema,
-    vector<RangeWithHashSchemas>* bounds_with_hash_schemas) const {
+    RangesWithHashSchemas* bounds_with_hash_schemas) const {
   DCHECK(bounds_with_hash_schemas->empty());
   if (range_bounds.empty()) {
     bounds_with_hash_schemas->emplace_back(RangeWithHashSchemas{"", "", {}});
@@ -448,16 +448,16 @@ Status PartitionSchema::EncodeRangeBounds(
   return Status::OK();
 }
 
-Status PartitionSchema::SplitRangeBounds(const Schema& schema,
-                                         vector<string> splits,
-                                         vector<RangeWithHashSchemas>*
-                                             bounds_with_hash_schemas) const {
+Status PartitionSchema::SplitRangeBounds(
+    const Schema& schema,
+    const vector<string>& splits,
+    RangesWithHashSchemas* bounds_with_hash_schemas) const {
   if (splits.empty()) {
     return Status::OK();
   }
 
   auto expected_bounds = std::max(1UL, bounds_with_hash_schemas->size()) + 
splits.size();
-  vector<RangeWithHashSchemas> new_bounds_with_hash_schemas;
+  RangesWithHashSchemas new_bounds_with_hash_schemas;
   new_bounds_with_hash_schemas.reserve(expected_bounds);
 
   // Iterate through the sorted bounds and sorted splits, splitting the bounds
@@ -497,7 +497,7 @@ Status PartitionSchema::SplitRangeBounds(const Schema& 
schema,
 Status PartitionSchema::CreatePartitions(
     const vector<KuduPartialRow>& split_rows,
     const vector<pair<KuduPartialRow, KuduPartialRow>>& range_bounds,
-    const RangeHashSchema& range_hash_schemas,
+    const PerRangeHashBucketSchemas& range_hash_schemas,
     const Schema& schema,
     vector<Partition>* partitions) const {
   const auto& hash_encoder = GetKeyEncoder<string>(GetTypeInfo(UINT32));
@@ -531,12 +531,12 @@ Status PartitionSchema::CreatePartitions(
     }
   }
 
-  vector<RangeWithHashSchemas> bounds_with_hash_schemas;
+  RangesWithHashSchemas bounds_with_hash_schemas;
   vector<string> splits;
   RETURN_NOT_OK(EncodeRangeBounds(range_bounds, range_hash_schemas, schema,
                                   &bounds_with_hash_schemas));
   RETURN_NOT_OK(EncodeRangeSplits(split_rows, schema, &splits));
-  RETURN_NOT_OK(SplitRangeBounds(schema, std::move(splits), 
&bounds_with_hash_schemas));
+  RETURN_NOT_OK(SplitRangeBounds(schema, splits, &bounds_with_hash_schemas));
 
   // Maps each partition to its respective hash schemas within 
'bounds_with_hash_schemas',
   // needed for logic later in function for filling in holes in partition key 
space. Will be
diff --git a/src/kudu/common/partition.h b/src/kudu/common/partition.h
index 1879b93..bea781a 100644
--- a/src/kudu/common/partition.h
+++ b/src/kudu/common/partition.h
@@ -155,13 +155,14 @@ class PartitionSchema {
 
   typedef std::vector<HashBucketSchema> HashBucketSchemas;
   // Holds each bound's HashBucketSchemas.
-  typedef std::vector<HashBucketSchemas> RangeHashSchema;
+  typedef std::vector<HashBucketSchemas> PerRangeHashBucketSchemas;
 
   struct RangeWithHashSchemas {
     std::string lower;
     std::string upper;
     HashBucketSchemas hash_schemas;
   };
+  typedef std::vector<RangeWithHashSchemas> RangesWithHashSchemas;
 
   // Extracts HashBucketSchemas from a protobuf repeated field of hash buckets.
   static Status ExtractHashBucketSchemasFromPB(
@@ -200,7 +201,7 @@ class PartitionSchema {
   Status CreatePartitions(
       const std::vector<KuduPartialRow>& split_rows,
       const std::vector<std::pair<KuduPartialRow, KuduPartialRow>>& 
range_bounds,
-      const RangeHashSchema& range_hash_schemas,
+      const PerRangeHashBucketSchemas& range_hash_schemas,
       const Schema& schema,
       std::vector<Partition>* partitions) const WARN_UNUSED_RESULT;
 
@@ -305,7 +306,7 @@ class PartitionSchema {
     return hash_bucket_schemas_;
   }
 
-  const std::vector<RangeWithHashSchemas>& ranges_with_hash_schemas() const {
+  const RangesWithHashSchemas& ranges_with_hash_schemas() const {
     return ranges_with_hash_schemas_;
   }
 
@@ -444,17 +445,17 @@ class PartitionSchema {
   // it indicates that the table wide hash schema will be used per range.
   Status EncodeRangeBounds(
       const std::vector<std::pair<KuduPartialRow, KuduPartialRow>>& 
range_bounds,
-      const RangeHashSchema& range_hash_schemas,
+      const PerRangeHashBucketSchemas& range_hash_schemas,
       const Schema& schema,
-      std::vector<RangeWithHashSchemas>* bounds_with_hash_schemas) const;
+      RangesWithHashSchemas* bounds_with_hash_schemas) const;
 
   // Splits the encoded range bounds by the split points. The splits and 
bounds within
   // 'bounds_with_hash_schemas' must be sorted. If `bounds_with_hash_schemas` 
is empty,
   // then a single unbounded range is assumed. If any of the splits falls 
outside
   // of the bounds, then an InvalidArgument status is returned.
   Status SplitRangeBounds(const Schema& schema,
-                          std::vector<std::string> splits,
-                          std::vector<RangeWithHashSchemas>* 
bounds_with_hash_schemas) const;
+                          const std::vector<std::string>& splits,
+                          RangesWithHashSchemas* bounds_with_hash_schemas) 
const;
 
   // Increments a range partition key, setting 'increment' to true if the
   // increment succeeds, or false if all range partition columns are already 
the
@@ -463,8 +464,7 @@ class PartitionSchema {
 
   HashBucketSchemas hash_bucket_schemas_;
   RangeSchema range_schema_;
-
-  std::vector<RangeWithHashSchemas> ranges_with_hash_schemas_;
+  RangesWithHashSchemas ranges_with_hash_schemas_;
 };
 
 } // namespace kudu
diff --git a/src/kudu/integration-tests/table_locations-itest.cc 
b/src/kudu/integration-tests/table_locations-itest.cc
index 2b11f80..67dc370 100644
--- a/src/kudu/integration-tests/table_locations-itest.cc
+++ b/src/kudu/integration-tests/table_locations-itest.cc
@@ -159,13 +159,15 @@ class TableLocationsTest : public KuduTest {
     int32_t num_buckets;
     uint32_t seed;
   };
+  typedef vector<HashBucketSchema> HashBucketSchemas;
+  typedef vector<HashBucketSchemas> PerRangeHashBucketSchemas;
 
   Status CreateTable(const string& table_name,
                      const Schema& schema,
                      const vector<KuduPartialRow>& split_rows,
                      const vector<pair<KuduPartialRow, KuduPartialRow>>& 
bounds,
-                     const vector<vector<HashBucketSchema>>& range_hash_schema,
-                     const vector<HashBucketSchema>& table_hash_schema);
+                     const PerRangeHashBucketSchemas& range_hash_schema,
+                     const HashBucketSchemas& table_hash_schema);
 
 
   void CreateTable(const string& table_name, int num_splits);
@@ -178,14 +180,13 @@ class TableLocationsTest : public KuduTest {
   unique_ptr<MasterServiceProxy> proxy_;
 };
 
-Status TableLocationsTest::CreateTable(const string& table_name,
-                                       const Schema& schema,
-                                       const vector<KuduPartialRow>& 
split_rows = {},
-                                       const vector<pair<KuduPartialRow,
-                                                         KuduPartialRow>>& 
bounds = {},
-                                       const vector<vector<HashBucketSchema>>&
-                                           range_hash_schema = {},
-                                       const vector<HashBucketSchema>& 
table_hash_schema = {}) {
+Status TableLocationsTest::CreateTable(
+    const string& table_name,
+    const Schema& schema,
+    const vector<KuduPartialRow>& split_rows = {},
+    const vector<pair<KuduPartialRow, KuduPartialRow>>& bounds = {},
+    const PerRangeHashBucketSchemas& range_hash_schema = {},
+    const HashBucketSchemas& table_hash_schema = {}) {
 
   CreateTableRequestPB req;
   req.set_name(table_name);
@@ -476,15 +477,15 @@ TEST_F(TableLocationsTest, TestRangeSpecificHashing) {
   ASSERT_OK(bounds[2].first.SetStringNoCopy(0, "e"));
   ASSERT_OK(bounds[2].second.SetStringNoCopy(0, "f"));
 
-  vector<vector<HashBucketSchema>> range_hash_schema;
-  vector<HashBucketSchema> hash_schema_4_by_2 = { { { "key" }, 4, 0 }, { { 
"val" }, 2, 0} };
+  PerRangeHashBucketSchemas range_hash_schema;
+  HashBucketSchemas hash_schema_4_by_2 = { { { "key" }, 4, 0 }, { { "val" }, 
2, 0} };
   range_hash_schema.emplace_back(hash_schema_4_by_2);
-  vector<HashBucketSchema> hash_schema_6 = { { { "key" }, 6, 2 } };
+  HashBucketSchemas hash_schema_6 = { { { "key" }, 6, 2 } };
   range_hash_schema.emplace_back(hash_schema_6);
 
   // Table-wide hash schema, applied to range by default if no per-range 
schema is specified.
-  vector<HashBucketSchema> table_hash_schema_5 = { { { "val" }, 5, 4 } };
-  range_hash_schema.emplace_back(vector<HashBucketSchema>());
+  HashBucketSchemas table_hash_schema_5 = { { { "val" }, 5, 4 } };
+  range_hash_schema.push_back({});
 
   ASSERT_OK(CreateTable(table_name, schema, {}, bounds, range_hash_schema, 
table_hash_schema_5));
   NO_FATALS(CheckMasterTableCreation(table_name, 19));
diff --git a/src/kudu/master/catalog_manager.cc 
b/src/kudu/master/catalog_manager.cc
index 318487a..8c18e55 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -1859,7 +1859,7 @@ Status CatalogManager::CreateTable(const 
CreateTableRequestPB* orig_req,
     }
   }
 
-  PartitionSchema::RangeHashSchema range_hash_schemas;
+  PartitionSchema::PerRangeHashBucketSchemas range_hash_schemas;
   if (FLAGS_enable_per_range_hash_schemas) {
     // TODO(aserbin): the signature of CreatePartitions() require the
     //                'range_hash_schemas' parameters: update its signature
diff --git a/src/kudu/master/master-test.cc b/src/kudu/master/master-test.cc
index 748a759..d314c8b 100644
--- a/src/kudu/master/master-test.cc
+++ b/src/kudu/master/master-test.cc
@@ -176,6 +176,8 @@ class MasterTest : public KuduTest {
     int32_t num_buckets;
     uint32_t seed;
   };
+  typedef vector<HashBucketSchema> HashBucketSchemas;
+  typedef vector<HashBucketSchemas> PerRangeHashBucketSchemas;
 
   void DoListTables(const ListTablesRequestPB& req, ListTablesResponsePB* 
resp);
   void DoListAllTables(ListTablesResponsePB* resp);
@@ -189,7 +191,7 @@ class MasterTest : public KuduTest {
                      const optional<string>& owner,
                      const optional<string>& comment = boost::none,
                      const optional<TableTypePB>& table_type = boost::none,
-                     const vector<vector<HashBucketSchema>>& range_hash_schema 
= {});
+                     const PerRangeHashBucketSchemas& range_hash_schema = {});
 
   shared_ptr<Messenger> client_messenger_;
   unique_ptr<MiniMaster> mini_master_;
@@ -605,7 +607,7 @@ Status MasterTest::CreateTable(const string& table_name,
                                const optional<string>& owner,
                                const optional<string>& comment,
                                const optional<TableTypePB>& table_type,
-                               const vector<vector<HashBucketSchema>>& 
range_hash_schema) {
+                               const PerRangeHashBucketSchemas& 
range_hash_schema) {
   CreateTableRequestPB req;
   req.set_name(table_name);
   if (table_type) {
@@ -858,7 +860,7 @@ TEST_F(MasterTest, ListTablesWithTableFilter) {
 }
 
 TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
-  const char *kTableName = "testtb";
+  constexpr const char* const kTableName = "testtb";
   const Schema kTableSchema({ ColumnSchema("key", INT32), ColumnSchema("val", 
INT32) }, 1);
 
   // No duplicate split rows.
@@ -878,9 +880,9 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
     ASSERT_OK(split1.SetInt32("key", 1));
     KuduPartialRow split2(&kTableSchema);
     Status s = CreateTable(kTableName, kTableSchema, { split1, split2 }, {}, 
none);
-    ASSERT_TRUE(s.IsInvalidArgument());
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
     ASSERT_STR_CONTAINS(s.ToString(),
-                        "Invalid argument: split rows must contain a value for 
at "
+                        "split rows must contain a value for at "
                         "least one range partition column");
   }
 
@@ -894,10 +896,10 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
     KuduPartialRow a_upper(&kTableSchema);
     ASSERT_OK(a_lower.SetInt32("key", 0));
     ASSERT_OK(a_upper.SetInt32("key", 100));
-    vector<vector<HashBucketSchema>> range_hash_schema = { 
vector<HashBucketSchema>() };
+    PerRangeHashBucketSchemas range_hash_schemas = {{}};
     Status s = CreateTable(kTableName, kTableSchema, { split1 }, { { a_lower, 
a_upper } },
-                           none, none, none, range_hash_schema);
-    ASSERT_TRUE(s.IsInvalidArgument());
+                           none, none, none, range_hash_schemas);
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
     ASSERT_STR_CONTAINS(s.ToString(),
                         "Both 'split_rows' and 'range_hash_schemas' cannot be "
                         "populated at the same time.");
@@ -915,9 +917,8 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
     KuduPartialRow b_upper(&kTableSchema);
     ASSERT_OK(b_lower.SetInt32("key", 100));
     ASSERT_OK(b_upper.SetInt32("key", 200));
-    vector<HashBucketSchema> hash_schemas_4 = { { {"key"}, 4, 0 } };
-    vector<vector<HashBucketSchema>> range_hash_schema =
-        { std::move(hash_schemas_4) };
+    HashBucketSchemas hash_schemas_4 = { { {"key"}, 4, 0 } };
+    PerRangeHashBucketSchemas range_hash_schema = { std::move(hash_schemas_4) 
};
     Status s = CreateTable(kTableName, kTableSchema, {},
                            { { a_lower, a_upper }, { b_lower, b_upper }, },
                            none, none, none, range_hash_schema);
@@ -933,9 +934,9 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
     ASSERT_OK(split.SetInt32("key", 1));
     ASSERT_OK(split.SetInt32("val", 1));
     Status s = CreateTable(kTableName, kTableSchema, { split }, {}, none);
-    ASSERT_TRUE(s.IsInvalidArgument());
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
     ASSERT_STR_CONTAINS(s.ToString(),
-                        "Invalid argument: split rows may only contain values "
+                        "split rows may only contain values "
                         "for range partitioned columns: val");
   }
 
@@ -950,8 +951,8 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
     ASSERT_OK(b_upper.SetInt32("key", 150));
     Status s = CreateTable(kTableName, kTableSchema, { }, { { a_lower, a_upper 
},
                                                             { b_lower, b_upper 
} }, none);
-    ASSERT_TRUE(s.IsInvalidArgument());
-    ASSERT_STR_CONTAINS(s.ToString(), "Invalid argument: overlapping range 
partition");
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(), "overlapping range partition");
   }
   { // Split row out of bounds (above).
     KuduPartialRow bound_lower(&kTableSchema);
@@ -964,8 +965,8 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
 
     Status s = CreateTable(kTableName, kTableSchema, { split },
                            { { bound_lower, bound_upper } }, none);
-    ASSERT_TRUE(s.IsInvalidArgument());
-    ASSERT_STR_CONTAINS(s.ToString(), "Invalid argument: split out of bounds");
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(), "split out of bounds");
   }
   { // Split row out of bounds (below).
     KuduPartialRow bound_lower(&kTableSchema);
@@ -978,8 +979,8 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
 
     Status s = CreateTable(kTableName, kTableSchema, { split },
                            { { bound_lower, bound_upper } }, none);
-    ASSERT_TRUE(s.IsInvalidArgument());
-    ASSERT_STR_CONTAINS(s.ToString(), "Invalid argument: split out of bounds");
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(), "split out of bounds");
   }
   { // Lower bound greater than upper bound.
     KuduPartialRow bound_lower(&kTableSchema);
@@ -988,10 +989,10 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
     ASSERT_OK(bound_upper.SetInt32("key", 0));
 
     Status s = CreateTable(kTableName, kTableSchema, { }, { { bound_lower, 
bound_upper } }, none);
-    ASSERT_TRUE(s.IsInvalidArgument());
-    ASSERT_STR_CONTAINS(s.ToString(),
-                        "Invalid argument: range partition lower bound must be 
"
-                        "less than the upper bound");
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(
+        s.ToString(),
+        "range partition lower bound must be less than the upper bound");
   }
   { // Lower bound equals upper bound.
     KuduPartialRow bound_lower(&kTableSchema);
@@ -1000,10 +1001,10 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) 
{
     ASSERT_OK(bound_upper.SetInt32("key", 0));
 
     Status s = CreateTable(kTableName, kTableSchema, { }, { { bound_lower, 
bound_upper } }, none);
-    ASSERT_TRUE(s.IsInvalidArgument());
-    ASSERT_STR_CONTAINS(s.ToString(),
-                        "Invalid argument: range partition lower bound must be 
"
-                        "less than the upper bound");
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(
+        s.ToString(),
+        "range partition lower bound must be less than the upper bound");
   }
   { // Split equals lower bound
     KuduPartialRow bound_lower(&kTableSchema);
@@ -1016,8 +1017,8 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
 
     Status s = CreateTable(kTableName, kTableSchema, { split }, { { 
bound_lower, bound_upper } },
                            none);
-    ASSERT_TRUE(s.IsInvalidArgument());
-    ASSERT_STR_CONTAINS(s.ToString(), "Invalid argument: split matches lower 
or upper bound");
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(), "split matches lower or upper bound");
   }
   { // Split equals upper bound
     KuduPartialRow bound_lower(&kTableSchema);
@@ -1030,8 +1031,8 @@ TEST_F(MasterTest, TestCreateTableCheckRangeInvariants) {
 
     Status s = CreateTable(kTableName, kTableSchema, { split }, { { 
bound_lower, bound_upper } },
                            none);
-    ASSERT_TRUE(s.IsInvalidArgument());
-    ASSERT_STR_CONTAINS(s.ToString(), "Invalid argument: split matches lower 
or upper bound");
+    ASSERT_TRUE(s.IsInvalidArgument()) << s.ToString();
+    ASSERT_STR_CONTAINS(s.ToString(), "split matches lower or upper bound");
   }
 }
 
@@ -1160,7 +1161,7 @@ TEST_F(MasterTest, 
NonPrimaryKeyColumnsForPerRangeCustomHashSchema) {
   KuduPartialRow upper(&kTableSchema);
   ASSERT_OK(lower.SetInt32("key", 0));
   ASSERT_OK(upper.SetInt32("key", 100));
-  vector<vector<HashBucketSchema>> range_hash_schema{{{{"int32_val"}, 2, 0}}};
+  PerRangeHashBucketSchemas range_hash_schema{{{{"int32_val"}, 2, 0}}};
   const auto s = CreateTable(
       kTableName, kTableSchema, {}, { { lower, upper } },
       none, none, none, range_hash_schema);

Reply via email to