This is an automated email from the ASF dual-hosted git repository.
alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git
The following commit(s) were added to refs/heads/master by this push:
new 0367a53 [common] simplify PartitionSchema methods' signatures
0367a53 is described below
commit 0367a53a6385470e21246fbab61ecd3084f20cf8
Author: Alexey Serbin <[email protected]>
AuthorDate: Wed Aug 4 16:41:48 2021 -0700
[common] simplify PartitionSchema methods' signatures
While working on the key encoding code, I noticed that many methods
return a Status object when in reality they cannot return anything but
Status::OK(). This patch updates the signatures of those methods
and corresponding call sites.
This changelist doesn't contain any functional changes.
Change-Id: I75bc4329e93f83fbad8f0120ee1d4979f15c0aed
Reviewed-on: http://gerrit.cloudera.org:8080/17754
Reviewed-by: Andrew Wong <[email protected]>
Tested-by: Kudu Jenkins
Reviewed-by: Mahesh Reddy <[email protected]>
---
src/kudu/client/batcher.cc | 12 +--
src/kudu/client/partitioner-internal.cc | 3 +-
src/kudu/common/partition-test.cc | 21 ++--
src/kudu/common/partition.cc | 160 +++++++++++++----------------
src/kudu/common/partition.h | 74 ++++++-------
src/kudu/common/scan_spec.cc | 17 ++-
src/kudu/common/schema.h | 4 +-
src/kudu/tablet/tablet.cc | 8 +-
src/kudu/transactions/txn_system_client.cc | 11 +-
9 files changed, 131 insertions(+), 179 deletions(-)
diff --git a/src/kudu/client/batcher.cc b/src/kudu/client/batcher.cc
index 9fdc872..a50602d 100644
--- a/src/kudu/client/batcher.cc
+++ b/src/kudu/client/batcher.cc
@@ -319,9 +319,7 @@ WriteRpc::WriteRpc(const scoped_refptr<Batcher>& batcher,
const Partition& partition = op->tablet->partition();
const PartitionSchema& partition_schema = table()->partition_schema();
const KuduPartialRow& row = op->write_op->row();
- bool partition_contains_row;
- CHECK(partition_schema.PartitionContainsRow(partition, row,
&partition_contains_row).ok());
- CHECK(partition_contains_row)
+ CHECK(partition_schema.PartitionContainsRow(partition, row))
<< "Row " << partition_schema.PartitionKeyDebugString(row)
<< " not in partition " <<
partition_schema.PartitionDebugString(partition, *schema);
#endif
@@ -736,8 +734,6 @@ Status Batcher::Add(KuduWriteOperation* write_op) {
// As soon as we get the op, start looking up where it belongs,
// so that when the user calls Flush, we are ready to go.
InFlightOp* op = arena_.NewObject<InFlightOp>();
- string partition_key;
-
RETURN_NOT_OK(write_op->table_->partition_schema().EncodeKey(write_op->row(),
&partition_key));
op->write_op.reset(write_op);
op->state = InFlightOp::kLookingUpTablet;
@@ -750,9 +746,11 @@ Status Batcher::Add(KuduWriteOperation* write_op) {
MonoTime deadline = ComputeDeadlineUnlocked();
++outstanding_lookups_;
scoped_refptr<Batcher> self(this);
+ const auto* table = write_op->table();
+ DCHECK(table);
client_->data_->meta_cache_->LookupTabletByKey(
- op->write_op->table(),
- std::move(partition_key),
+ table,
+ table->partition_schema().EncodeKey(write_op->row()),
deadline,
MetaCache::LookupType::kPoint,
&op->tablet,
diff --git a/src/kudu/client/partitioner-internal.cc
b/src/kudu/client/partitioner-internal.cc
index 09ba399..87788d9 100644
--- a/src/kudu/client/partitioner-internal.cc
+++ b/src/kudu/client/partitioner-internal.cc
@@ -76,8 +76,7 @@ Status KuduPartitionerBuilder::Data::Build(KuduPartitioner**
partitioner) {
Status KuduPartitioner::Data::PartitionRow(
const KuduPartialRow& row, int* partition) {
- tmp_buf_.clear();
- RETURN_NOT_OK(table_->data_->partition_schema_.EncodeKey(row, &tmp_buf_));
+ tmp_buf_ = table_->data_->partition_schema_.EncodeKey(row);
*partition = FindFloorOrDie(partitions_by_start_key_, tmp_buf_);
return Status::OK();
}
diff --git a/src/kudu/common/partition-test.cc
b/src/kudu/common/partition-test.cc
index e405b55..de465bf 100644
--- a/src/kudu/common/partition-test.cc
+++ b/src/kudu/common/partition-test.cc
@@ -215,10 +215,9 @@ TEST_F(PartitionTest, TestPartitionKeyEncoding) {
partition_schema.DebugString(schema));
{
- string key;
KuduPartialRow row(&schema);
ASSERT_OK(row.SetInt32("a", 0));
- ASSERT_OK(partition_schema.EncodeKey(row, &key));
+ string key = partition_schema.EncodeKey(row);
EXPECT_EQ(string("\0\0\0\0" // hash(0, "")
"\0\0\0\x14" // hash("")
@@ -231,10 +230,9 @@ TEST_F(PartitionTest, TestPartitionKeyEncoding) {
}
{
- string key;
KuduPartialRow row(&schema);
ASSERT_OK(row.SetInt32("a", 1));
- ASSERT_OK(partition_schema.EncodeKey(row, &key));
+ string key = partition_schema.EncodeKey(row);
EXPECT_EQ(string("\0\0\0\x5" // hash(1, "")
"\0\0\0\x14" // hash("")
@@ -248,12 +246,11 @@ TEST_F(PartitionTest, TestPartitionKeyEncoding) {
}
{
- string key;
KuduPartialRow row(&schema);
ASSERT_OK(row.SetInt32("a", 0));
ASSERT_OK(row.SetStringCopy("b", "b"));
ASSERT_OK(row.SetStringCopy("c", "c"));
- ASSERT_OK(partition_schema.EncodeKey(row, &key));
+ string key = partition_schema.EncodeKey(row);
EXPECT_EQ(string("\0\0\0\x1A" // hash(0, "b")
"\0\0\0\x1D" // hash("c")
@@ -268,12 +265,11 @@ TEST_F(PartitionTest, TestPartitionKeyEncoding) {
}
{
- string key;
KuduPartialRow row(&schema);
ASSERT_OK(row.SetInt32("a", 1));
ASSERT_OK(row.SetStringCopy("b", "b"));
ASSERT_OK(row.SetStringCopy("c", "c"));
- ASSERT_OK(partition_schema.EncodeKey(row, &key));
+ string key = partition_schema.EncodeKey(row);
EXPECT_EQ(string("\0\0\0\x0" // hash(1, "b")
"\0\0\0\x1D" // hash("c")
@@ -290,12 +286,11 @@ TEST_F(PartitionTest, TestPartitionKeyEncoding) {
{
// Check that row values are redacted when the log redact flag is set.
ASSERT_NE("", gflags::SetCommandLineOption("redact", "log"));
- string key;
KuduPartialRow row(&schema);
ASSERT_OK(row.SetInt32("a", 1));
ASSERT_OK(row.SetStringCopy("b", "b"));
ASSERT_OK(row.SetStringCopy("c", "c"));
- ASSERT_OK(partition_schema.EncodeKey(row, &key));
+ string key = partition_schema.EncodeKey(row);
string expected =
R"(HASH (a, b): 0, HASH (c): 29, RANGE (a, b, c): (<redacted>,
<redacted>, <redacted>))";
@@ -525,14 +520,12 @@ TEST_F(PartitionTest, TestCreatePartitions) {
ASSERT_OK(split_a.SetStringCopy("a", "a1"));
ASSERT_OK(split_a.SetStringCopy("b", "b1"));
ASSERT_OK(split_a.SetStringCopy("c", "c1"));
- string partition_key_a;
- ASSERT_OK(partition_schema.EncodeKey(split_a, &partition_key_a));
+ string partition_key_a = partition_schema.EncodeKey(split_a);
KuduPartialRow split_b(&schema);
ASSERT_OK(split_b.SetStringCopy("a", "a2"));
ASSERT_OK(split_b.SetStringCopy("b", "b2"));
- string partition_key_b;
- ASSERT_OK(partition_schema.EncodeKey(split_b, &partition_key_b));
+ string partition_key_b = partition_schema.EncodeKey(split_b);
// Split keys need not be passed in sorted order.
vector<KuduPartialRow> split_rows = { split_b, split_a };
diff --git a/src/kudu/common/partition.cc b/src/kudu/common/partition.cc
index f2b5c55..950fdc1 100644
--- a/src/kudu/common/partition.cc
+++ b/src/kudu/common/partition.cc
@@ -306,27 +306,30 @@ Status PartitionSchema::ToPB(const Schema& schema,
PartitionSchemaPB* pb) const
}
template<typename Row>
-Status PartitionSchema::EncodeKeyImpl(const Row& row, string* buf) const {
+void PartitionSchema::EncodeKeyImpl(const Row& row, string* buf) const {
// TODO(aserbin): update the implementation and remove the DCHECK() below
DCHECK(ranges_with_hash_schemas_.empty())
<< "ranges with custom hash schemas are not yet supported";
const KeyEncoder<string>& hash_encoder =
GetKeyEncoder<string>(GetTypeInfo(UINT32));
for (const HashBucketSchema& hash_bucket_schema : hash_bucket_schemas_) {
- int32_t bucket;
- RETURN_NOT_OK(BucketForRow(row, hash_bucket_schema, &bucket));
+ const int32_t bucket = BucketForRow(row, hash_bucket_schema);
hash_encoder.Encode(&bucket, buf);
}
return EncodeColumns(row, range_schema_.column_ids, buf);
}
-Status PartitionSchema::EncodeKey(const KuduPartialRow& row, string* buf)
const {
- return EncodeKeyImpl(row, buf);
+string PartitionSchema::EncodeKey(const KuduPartialRow& row) const {
+ string encoded_key;
+ EncodeKeyImpl(row, &encoded_key);
+ return encoded_key;
}
-Status PartitionSchema::EncodeKey(const ConstContiguousRow& row, string* buf)
const {
- return EncodeKeyImpl(row, buf);
+string PartitionSchema::EncodeKey(const ConstContiguousRow& row) const {
+ string encoded_key;
+ EncodeKeyImpl(row, &encoded_key);
+ return encoded_key;
}
Status PartitionSchema::EncodeRangeKey(const KuduPartialRow& row,
@@ -351,7 +354,8 @@ Status PartitionSchema::EncodeRangeKey(const
KuduPartialRow& row,
if (contains_no_columns) {
return Status::OK();
}
- return EncodeColumns(row, range_schema_.column_ids, key);
+ EncodeColumns(row, range_schema_.column_ids, key);
+ return Status::OK();
}
Status PartitionSchema::EncodeRangeSplits(const vector<KuduPartialRow>&
split_rows,
@@ -656,89 +660,75 @@ Status PartitionSchema::CreatePartitions(
}
template<typename Row>
-Status PartitionSchema::PartitionContainsRowImpl(const Partition& partition,
- const Row& row,
- bool* contains) const {
+bool PartitionSchema::PartitionContainsRowImpl(const Partition& partition,
+ const Row& row) const {
CHECK_EQ(partition.hash_buckets().size(), hash_bucket_schemas_.size());
for (int i = 0; i < hash_bucket_schemas_.size(); i++) {
- RETURN_NOT_OK(HashPartitionContainsRowImpl(partition, row, i, contains));
- if (!*contains) {
- return Status::OK();
+ if (!HashPartitionContainsRowImpl(partition, row, i)) {
+ return false;
}
}
- RETURN_NOT_OK(RangePartitionContainsRowImpl(partition, row, contains));
-
- return Status::OK();
+ return RangePartitionContainsRowImpl(partition, row);
}
template<typename Row>
-Status PartitionSchema::HashPartitionContainsRowImpl(const Partition&
partition,
- const Row& row,
- int hash_idx,
- bool* contains) const {
+bool PartitionSchema::HashPartitionContainsRowImpl(const Partition& partition,
+ const Row& row,
+ int hash_idx) const {
DCHECK_EQ(partition.hash_buckets().size(), hash_bucket_schemas_.size());
const HashBucketSchema& hash_bucket_schema = hash_bucket_schemas_[hash_idx];
- int32_t bucket = -1;
- RETURN_NOT_OK(BucketForRow(row, hash_bucket_schema, &bucket));
- *contains = (partition.hash_buckets()[hash_idx] == bucket);
- return Status::OK();
+ const int32_t bucket = BucketForRow(row, hash_bucket_schema);
+ return partition.hash_buckets()[hash_idx] == bucket;
}
template<typename Row>
-Status PartitionSchema::RangePartitionContainsRowImpl(const Partition&
partition,
- const Row& row,
- bool* contains) const {
- string range_partition_key;
+bool PartitionSchema::RangePartitionContainsRowImpl(
+ const Partition& partition, const Row& row) const {
// If range partition is not used, column_ids would be empty and
// EncodedColumn() would return immediately.
- RETURN_NOT_OK(EncodeColumns(row, range_schema_.column_ids,
&range_partition_key));
+ string range_partition_key;
+ EncodeColumns(row, range_schema_.column_ids, &range_partition_key);
// If all of the hash buckets match, then the row is contained in the
// partition if the row is gte the lower bound; and if there is no upper
// bound, or the row is lt the upper bound.
const auto key = Slice(range_partition_key);
- *contains = (partition.range_key_start() <= key) &&
+ return
+ (partition.range_key_start() <= key) &&
(partition.range_key_end().empty() || key < partition.range_key_end());
- return Status::OK();
}
-Status PartitionSchema::PartitionContainsRow(const Partition& partition,
- const KuduPartialRow& row,
- bool* contains) const {
- return PartitionContainsRowImpl(partition, row, contains);
+bool PartitionSchema::PartitionContainsRow(const Partition& partition,
+ const KuduPartialRow& row) const {
+ return PartitionContainsRowImpl(partition, row);
}
-Status PartitionSchema::PartitionContainsRow(const Partition& partition,
- const ConstContiguousRow& row,
- bool* contains) const {
- return PartitionContainsRowImpl(partition, row, contains);
+bool PartitionSchema::PartitionContainsRow(const Partition& partition,
+ const ConstContiguousRow& row)
const {
+ return PartitionContainsRowImpl(partition, row);
}
-Status PartitionSchema::HashPartitionContainsRow(const Partition& partition,
- const KuduPartialRow& row,
- int hash_idx,
- bool* contains) const {
- return HashPartitionContainsRowImpl(partition, row, hash_idx, contains);
+bool PartitionSchema::HashPartitionContainsRow(const Partition& partition,
+ const KuduPartialRow& row,
+ int hash_idx) const {
+ return HashPartitionContainsRowImpl(partition, row, hash_idx);
}
-Status PartitionSchema::HashPartitionContainsRow(const Partition& partition,
- const ConstContiguousRow& row,
- int hash_idx,
- bool* contains) const {
- return HashPartitionContainsRowImpl(partition, row, hash_idx, contains);
+bool PartitionSchema::HashPartitionContainsRow(const Partition& partition,
+ const ConstContiguousRow& row,
+ int hash_idx) const {
+ return HashPartitionContainsRowImpl(partition, row, hash_idx);
}
-Status PartitionSchema::RangePartitionContainsRow(const Partition& partition,
- const KuduPartialRow& row,
- bool* contains) const {
- return RangePartitionContainsRowImpl(partition, row, contains);
+bool PartitionSchema::RangePartitionContainsRow(
+ const Partition& partition, const KuduPartialRow& row) const {
+ return RangePartitionContainsRowImpl(partition, row);
}
-Status PartitionSchema::RangePartitionContainsRow(const Partition& partition,
- const ConstContiguousRow&
row,
- bool* contains) const {
- return RangePartitionContainsRowImpl(partition, row, contains);
+bool PartitionSchema::RangePartitionContainsRow(
+ const Partition& partition, const ConstContiguousRow& row) const {
+ return RangePartitionContainsRowImpl(partition, row);
}
Status PartitionSchema::DecodeRangeKey(Slice* encoded_key,
@@ -873,18 +863,13 @@ string PartitionSchema::PartitionDebugString(const
Partition& partition,
template<typename Row>
string PartitionSchema::PartitionKeyDebugStringImpl(const Row& row) const {
vector<string> components;
-
+ components.reserve(hash_bucket_schemas_.size() +
+ range_schema_.column_ids.size());
for (const HashBucketSchema& hash_bucket_schema : hash_bucket_schemas_) {
- int32_t bucket;
- Status s = BucketForRow(row, hash_bucket_schema, &bucket);
- if (s.ok()) {
- components.emplace_back(
- Substitute("HASH ($0): $1",
- ColumnIdsToColumnNames(*row.schema(),
hash_bucket_schema.column_ids),
- bucket));
- } else {
- components.emplace_back(Substitute("<hash-error: $0>", s.ToString()));
- }
+ components.emplace_back(Substitute(
+ "HASH ($0): $1",
+ ColumnIdsToColumnNames(*row.schema(), hash_bucket_schema.column_ids),
+ BucketForRow(row, hash_bucket_schema)));
}
if (!range_schema_.column_ids.empty()) {
@@ -1143,9 +1128,9 @@ bool PartitionSchema::operator==(const PartitionSchema&
rhs) const {
}
// Encodes the specified primary key columns of the supplied row into the
buffer.
-Status PartitionSchema::EncodeColumns(const ConstContiguousRow& row,
- const vector<ColumnId>& column_ids,
- string* buf) {
+void PartitionSchema::EncodeColumns(const ConstContiguousRow& row,
+ const vector<ColumnId>& column_ids,
+ string* buf) {
for (size_t i = 0; i < column_ids.size(); ++i) {
ColumnId column_id = column_ids[i];
auto column_idx = row.schema()->find_column_by_id(column_id);
@@ -1154,13 +1139,12 @@ Status PartitionSchema::EncodeColumns(const
ConstContiguousRow& row,
GetKeyEncoder<string>(type).Encode(
row.cell_ptr(column_idx), i + 1 == column_ids.size(), buf);
}
- return Status::OK();
}
// Encodes the specified primary key columns of the supplied row into the
buffer.
-Status PartitionSchema::EncodeColumns(const KuduPartialRow& row,
- const vector<ColumnId>& column_ids,
- string* buf) {
+void PartitionSchema::EncodeColumns(const KuduPartialRow& row,
+ const vector<ColumnId>& column_ids,
+ string* buf) {
for (size_t i = 0; i < column_ids.size(); ++i) {
auto column_idx = row.schema()->find_column_by_id(column_ids[i]);
CHECK(column_idx != Schema::kColumnNotFound);
@@ -1177,7 +1161,6 @@ Status PartitionSchema::EncodeColumns(const
KuduPartialRow& row,
cont_row.cell_ptr(column_idx), i + 1 == column_ids.size(), buf);
}
}
- return Status::OK();
}
int32_t PartitionSchema::BucketForEncodedColumns(const string&
encoded_hash_columns,
@@ -1244,14 +1227,13 @@ Status PartitionSchema::ValidateHashBucketSchemas(const
Schema& schema,
}
template<typename Row>
-Status PartitionSchema::BucketForRow(const Row& row,
- const HashBucketSchema&
hash_bucket_schema,
- int32_t* bucket) {
+int32_t PartitionSchema::BucketForRow(
+ const Row& row, const HashBucketSchema& hash_bucket_schema) {
string buf;
- RETURN_NOT_OK(EncodeColumns(row, hash_bucket_schema.column_ids, &buf));
- uint64_t hash = HashUtil::MurmurHash2_64(buf.data(), buf.length(),
hash_bucket_schema.seed);
- *bucket = hash % static_cast<uint64_t>(hash_bucket_schema.num_buckets);
- return Status::OK();
+ EncodeColumns(row, hash_bucket_schema.column_ids, &buf);
+ uint64_t hash = HashUtil::MurmurHash2_64(
+ buf.data(), buf.length(), hash_bucket_schema.seed);
+ return hash % static_cast<uint64_t>(hash_bucket_schema.num_buckets);
}
//------------------------------------------------------------
@@ -1260,14 +1242,12 @@ Status PartitionSchema::BucketForRow(const Row& row,
//------------------------------------------------------------
template
-Status PartitionSchema::BucketForRow(const KuduPartialRow& row,
- const HashBucketSchema&
hash_bucket_schema,
- int32_t* bucket);
+int32_t PartitionSchema::BucketForRow(const KuduPartialRow& row,
+ const HashBucketSchema&
hash_bucket_schema);
template
-Status PartitionSchema::BucketForRow(const ConstContiguousRow& row,
- const HashBucketSchema&
hash_bucket_schema,
- int32_t* bucket);
+int32_t PartitionSchema::BucketForRow(const ConstContiguousRow& row,
+ const HashBucketSchema&
hash_bucket_schema);
void PartitionSchema::Clear() {
hash_bucket_schemas_.clear();
diff --git a/src/kudu/common/partition.h b/src/kudu/common/partition.h
index bea781a..0bfdae2 100644
--- a/src/kudu/common/partition.h
+++ b/src/kudu/common/partition.h
@@ -182,8 +182,8 @@ class PartitionSchema {
// Appends the row's encoded partition key into the provided buffer.
// On failure, the buffer may have data partially appended.
- Status EncodeKey(const KuduPartialRow& row, std::string* buf) const
WARN_UNUSED_RESULT;
- Status EncodeKey(const ConstContiguousRow& row, std::string* buf) const
WARN_UNUSED_RESULT;
+ std::string EncodeKey(const KuduPartialRow& row) const;
+ std::string EncodeKey(const ConstContiguousRow& row) const;
// Creates the set of table partitions for a partition schema and collection
// of split rows and split bounds.
@@ -206,30 +206,24 @@ class PartitionSchema {
std::vector<Partition>* partitions) const WARN_UNUSED_RESULT;
// Tests if the partition contains the row.
- Status PartitionContainsRow(const Partition& partition,
- const KuduPartialRow& row,
- bool* contains) const WARN_UNUSED_RESULT;
- Status PartitionContainsRow(const Partition& partition,
- const ConstContiguousRow& row,
- bool* contains) const WARN_UNUSED_RESULT;
+ bool PartitionContainsRow(const Partition& partition,
+ const KuduPartialRow& row) const;
+ bool PartitionContainsRow(const Partition& partition,
+ const ConstContiguousRow& row) const;
// Tests if the hash partition contains the row with given hash_idx.
- Status HashPartitionContainsRow(const Partition& partition,
- const KuduPartialRow& row,
- int hash_idx,
- bool* contains) const WARN_UNUSED_RESULT;
- Status HashPartitionContainsRow(const Partition& partition,
- const ConstContiguousRow& row,
- int hash_idx,
- bool* contains) const WARN_UNUSED_RESULT;
+ bool HashPartitionContainsRow(const Partition& partition,
+ const KuduPartialRow& row,
+ int hash_idx) const;
+ bool HashPartitionContainsRow(const Partition& partition,
+ const ConstContiguousRow& row,
+ int hash_idx) const;
// Tests if the range partition contains the row.
- Status RangePartitionContainsRow(const Partition& partition,
- const KuduPartialRow& row,
- bool* contains) const WARN_UNUSED_RESULT;
- Status RangePartitionContainsRow(const Partition& partition,
- const ConstContiguousRow& row,
- bool* contains) const WARN_UNUSED_RESULT;
+ bool RangePartitionContainsRow(const Partition& partition,
+ const KuduPartialRow& row) const;
+ bool RangePartitionContainsRow(const Partition& partition,
+ const ConstContiguousRow& row) const;
// Returns a text description of the partition suitable for debug printing.
//
@@ -336,15 +330,15 @@ class PartitionSchema {
// Encodes the specified columns of a row into lexicographic sort-order
// preserving format.
- static Status EncodeColumns(const KuduPartialRow& row,
- const std::vector<ColumnId>& column_ids,
- std::string* buf);
+ static void EncodeColumns(const KuduPartialRow& row,
+ const std::vector<ColumnId>& column_ids,
+ std::string* buf);
// Encodes the specified columns of a row into lexicographic sort-order
// preserving format.
- static Status EncodeColumns(const ConstContiguousRow& row,
- const std::vector<ColumnId>& column_ids,
- std::string* buf);
+ static void EncodeColumns(const ConstContiguousRow& row,
+ const std::vector<ColumnId>& column_ids,
+ std::string* buf);
// Returns the hash bucket of the encoded hash column. The encoded columns
must match the
// columns of the hash bucket schema.
@@ -362,9 +356,8 @@ class PartitionSchema {
// Assigns the row to a hash bucket according to the hash schema.
template<typename Row>
- static Status BucketForRow(const Row& row,
- const HashBucketSchema& hash_bucket_schema,
- int32_t* bucket);
+ static int32_t BucketForRow(const Row& row,
+ const HashBucketSchema& hash_bucket_schema);
// PartitionKeyDebugString implementation for row types.
template<typename Row>
@@ -372,26 +365,23 @@ class PartitionSchema {
// Private templated helper for PartitionContainsRow.
template<typename Row>
- Status PartitionContainsRowImpl(const Partition& partition,
- const Row& row,
- bool* contains) const;
+ bool PartitionContainsRowImpl(const Partition& partition,
+ const Row& row) const;
// Private templated helper for HashPartitionContainsRow.
template<typename Row>
- Status HashPartitionContainsRowImpl(const Partition& partition,
- const Row& row,
- int hash_idx,
- bool* contains) const;
+ bool HashPartitionContainsRowImpl(const Partition& partition,
+ const Row& row,
+ int hash_idx) const;
// Private templated helper for RangePartitionContainsRow.
template<typename Row>
- Status RangePartitionContainsRowImpl(const Partition& partition,
- const Row& row,
- bool* contains) const;
+ bool RangePartitionContainsRowImpl(const Partition& partition,
+ const Row& row) const;
// Private templated helper for EncodeKey.
template<typename Row>
- Status EncodeKeyImpl(const Row& row, std::string* buf) const;
+ void EncodeKeyImpl(const Row& row, std::string* buf) const;
// Returns true if all of the columns in the range partition key are unset in
// the row.
diff --git a/src/kudu/common/scan_spec.cc b/src/kudu/common/scan_spec.cc
index f029f99..e421829 100644
--- a/src/kudu/common/scan_spec.cc
+++ b/src/kudu/common/scan_spec.cc
@@ -198,20 +198,15 @@ void ScanSpec::PruneInlistValuesIfPossible(const Schema&
schema,
if (!s.ok()) return false;
// If value is not in given hash partition, remove this value from
predicate values.
- if (hash_idx != -1) {
- bool is_value_in;
- s = partition_schema.HashPartitionContainsRow(partition, partial_row,
- hash_idx,
&is_value_in);
- if (!s.ok()) return false;
- if (!is_value_in) return true;
+ if (hash_idx != -1 && !partition_schema.HashPartitionContainsRow(
+ partition, partial_row, hash_idx)) {
+ return true;
}
// If value is not in given range partition, remove this value from
predicate values.
- if (is_col_single_range_schema) {
- bool is_value_in;
- s = partition_schema.RangePartitionContainsRow(partition,
partial_row, &is_value_in);
- if (!s.ok()) return false;
- if (!is_value_in) return true;
+ if (is_col_single_range_schema &&
+ !partition_schema.RangePartitionContainsRow(partition,
partial_row)) {
+ return true;
}
return false;
}), predicate_values->end());
diff --git a/src/kudu/common/schema.h b/src/kudu/common/schema.h
index e44e018..3371ba4 100644
--- a/src/kudu/common/schema.h
+++ b/src/kudu/common/schema.h
@@ -221,7 +221,9 @@ class ColumnSchema {
// ColumnSchema col_c("c", INT32, false, &default_i32);
// Slice default_str("Hello");
// ColumnSchema col_d("d", STRING, false, &default_str);
- ColumnSchema(std::string name, DataType type, bool is_nullable = false,
+ ColumnSchema(std::string name,
+ DataType type,
+ bool is_nullable = false,
const void* read_default = nullptr,
const void* write_default = nullptr,
ColumnStorageAttributes attributes = ColumnStorageAttributes(),
diff --git a/src/kudu/tablet/tablet.cc b/src/kudu/tablet/tablet.cc
index cd60461..50106bc 100644
--- a/src/kudu/tablet/tablet.cc
+++ b/src/kudu/tablet/tablet.cc
@@ -602,12 +602,8 @@ Status Tablet::AcquireTxnLock(int64_t txn_id,
WriteOpState* op_state) {
}
Status Tablet::CheckRowInTablet(const ConstContiguousRow& row) const {
- bool contains_row;
-
RETURN_NOT_OK(metadata_->partition_schema().PartitionContainsRow(metadata_->partition(),
- row,
-
&contains_row));
-
- if (PREDICT_FALSE(!contains_row)) {
+ if (PREDICT_FALSE(!metadata_->partition_schema().PartitionContainsRow(
+ metadata_->partition(), row))) {
return Status::NotFound(
Substitute("Row not in tablet partition. Partition: '$0', row: '$1'.",
metadata_->partition_schema().PartitionDebugString(metadata_->partition(),
diff --git a/src/kudu/transactions/txn_system_client.cc
b/src/kudu/transactions/txn_system_client.cc
index d8d2f07..87e16c0 100644
--- a/src/kudu/transactions/txn_system_client.cc
+++ b/src/kudu/transactions/txn_system_client.cc
@@ -336,16 +336,15 @@ Status
TxnSystemClient::CoordinateTransactionAsync(CoordinatorOpPB coordinate_tx
/*tablet=*/nullptr
}));
- string partition_key;
KuduPartialRow row(&TxnStatusTablet::GetSchema());
DCHECK(ctx->coordinate_txn_op.has_txn_id());
- RETURN_NOT_OK(row.SetInt64(TxnStatusTablet::kTxnIdColName,
ctx->coordinate_txn_op.txn_id()));
- RETURN_NOT_OK(ctx->table->partition_schema().EncodeKey(row, &partition_key));
-
+ RETURN_NOT_OK(row.SetInt64(TxnStatusTablet::kTxnIdColName,
+ ctx->coordinate_txn_op.txn_id()));
TxnStatusTabletContext* ctx_raw = ctx.release();
+ const auto* table = ctx_raw->table.get();
client_->data_->meta_cache_->LookupTabletByKey(
- ctx_raw->table.get(),
- std::move(partition_key),
+ table,
+ table->partition_schema().EncodeKey(row),
deadline,
MetaCache::LookupType::kPoint,
&ctx_raw->tablet,